diff --git a/Jenkinsfile b/Jenkinsfile index 0f99c42cc4aa..fc11bb3299ea 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -112,7 +112,7 @@ def sonarAnalysis(args) { """+args+" -DskipTests -Dit.skipTests -Dmaven.javadoc.skip" sh """#!/bin/bash -e - sw java 11 && . /etc/profile.d/java.sh + sw java 17 && . /etc/profile.d/java.sh export MAVEN_OPTS=-Xmx5G """+mvnCmd } @@ -121,7 +121,7 @@ def sonarAnalysis(args) { def hdbPodTemplate(closure) { podTemplate( containers: [ - containerTemplate(name: 'hdb', image: 'kgyrtkirk/hive-dev-box:executor', ttyEnabled: true, command: 'tini -- cat', + containerTemplate(name: 'hdb', image: 'wecharyu/hive-dev-box:executor', ttyEnabled: true, command: 'tini -- cat', alwaysPullImage: true, resourceRequestCpu: '1800m', resourceLimitCpu: '8000m', @@ -287,7 +287,6 @@ set -x echo 127.0.0.1 dev_$dbType | sudo tee -a /etc/hosts . /etc/profile.d/confs.sh sw hive-dev $PWD -ping -c2 dev_$dbType export DOCKER_NETWORK=host export DBNAME=metastore reinit_metastore $dbType diff --git a/README.md b/README.md index c31d6108292b..c5f1d18663fa 100644 --- a/README.md +++ b/README.md @@ -24,10 +24,10 @@ subqueries, common table expressions, and more. Hive's SQL can also be extended with user code via user defined functions (UDFs), user defined aggregates (UDAFs), and user defined table functions (UDTFs). -Hive users have a choice of 3 runtimes when executing SQL queries. -Users can choose between Apache Hadoop MapReduce or Apache Tez -frameworks as their execution backend. MapReduce is a -mature framework that is proven at large scales. However, MapReduce +Hive users can choose between Apache Hadoop MapReduce or Apache Tez +frameworks as their execution backend. Note that MapReduce framework +has been deprecated since Hive 2, and Apache Tez is recommended. MapReduce +is a mature framework that is proven at large scales. However, MapReduce is a purely batch framework, and queries using it may experience higher latencies (tens of seconds), even over small datasets. Apache Tez is designed for interactive query, and has substantially reduced diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml index 255c9355bae5..0641ef3ddcb0 100644 --- a/accumulo-handler/pom.xml +++ b/accumulo-handler/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-accumulo-handler diff --git a/beeline/pom.xml b/beeline/pom.xml index 71da41a32da8..af67b00f0d7f 100644 --- a/beeline/pom.xml +++ b/beeline/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-beeline @@ -243,14 +243,12 @@ shade - - jar-with-dependencies - + false jar-with-dependencies - org.openjdk.jmh.Main + org.apache.hive.beeline.BeeLine diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLine.java b/beeline/src/java/org/apache/hive/beeline/BeeLine.java index 49b5fad11be7..33d13013049d 100644 --- a/beeline/src/java/org/apache/hive/beeline/BeeLine.java +++ b/beeline/src/java/org/apache/hive/beeline/BeeLine.java @@ -1831,7 +1831,7 @@ String getPromptForCli() { String prompt; // read prompt configuration and substitute variables. HiveConf conf = getCommands().getHiveConf(true); - prompt = conf.getVar(HiveConf.ConfVars.CLIPROMPT); + prompt = conf.getVar(HiveConf.ConfVars.CLI_PROMPT); prompt = getCommands().substituteVariables(conf, prompt); return prompt + getFormattedDb() + "> "; } diff --git a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java index 17af4e2cd714..04ebab7df2e4 100644 --- a/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java +++ b/beeline/src/java/org/apache/hive/beeline/BeeLineOpts.java @@ -291,7 +291,7 @@ public void updateBeeLineOptsFromConf() { if (conf == null) { conf = beeLine.getCommands().getHiveConf(false); } - setForce(HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS)); + setForce(HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_IGNORE_ERRORS)); } } @@ -529,7 +529,7 @@ public boolean getShowDbInPrompt() { return showDbInPrompt; } else { HiveConf conf = beeLine.getCommands().getHiveConf(true); - return HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIPRINTCURRENTDB); + return HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_PRINT_CURRENT_DB); } } diff --git a/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java b/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java index 5ea4d11b7abd..048ca59becb0 100644 --- a/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java +++ b/beeline/src/test/org/apache/hive/beeline/cli/TestHiveCli.java @@ -280,7 +280,7 @@ private void verifyCMD(String CMD, String keywords, OutputStream os, String[] op public static void init(){ // something changed scratch dir permissions, so test can't execute HiveConf hiveConf = new HiveConf(); - String scratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCHDIR.varname); + String scratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCH_DIR.varname); File file = new File(scratchDir); if (file.exists()) { file.setWritable(true, false); diff --git a/classification/pom.xml b/classification/pom.xml index 1a5f5c08a939..1152f318a041 100644 --- a/classification/pom.xml +++ b/classification/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-classification diff --git a/cli/pom.xml b/cli/pom.xml index bec5507ffa4f..b24ca6cc8261 100644 --- a/cli/pom.xml +++ b/cli/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-cli diff --git a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java index d0d37b506ab2..93e06208e1ee 100644 --- a/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java +++ b/cli/src/java/org/apache/hadoop/hive/cli/CliDriver.java @@ -425,7 +425,7 @@ public void handle(Signal signal) { ret = processCmd(command.toString()); lastRet = ret; } catch (CommandProcessorException e) { - boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIIGNOREERRORS); + boolean ignoreErrors = HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_IGNORE_ERRORS); if (!ignoreErrors) { throw e; } @@ -773,7 +773,7 @@ public int run(String[] args) throws Exception { } // read prompt configuration and substitute variables. - prompt = conf.getVar(HiveConf.ConfVars.CLIPROMPT); + prompt = conf.getVar(HiveConf.ConfVars.CLI_PROMPT); prompt = new VariableSubstitution(new HiveVariableSource() { @Override public Map getHiveVariable() { @@ -809,6 +809,7 @@ public Map getHiveVariable() { } catch (CommandProcessorException e) { return e.getResponseCode(); } finally { + SessionState.endStart(ss); ss.resetThreadName(); ss.close(); } @@ -936,7 +937,7 @@ protected void setupConsoleReader() throws IOException { * @return String to show user for current db value */ private static String getFormattedDb(HiveConf conf, CliSessionState ss) { - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLIPRINTCURRENTDB)) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.CLI_PRINT_CURRENT_DB)) { return ""; } //BUG: This will not work in remote mode - HIVE-5153 diff --git a/common/pom.xml b/common/pom.xml index 118c969ec45d..f3b87e8081c4 100644 --- a/common/pom.xml +++ b/common/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-common diff --git a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java index afe6607298a9..806ed9be66cf 100644 --- a/common/src/java/org/apache/hadoop/hive/common/LogUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/LogUtils.java @@ -117,11 +117,11 @@ public static String initHiveLog4jCommon(HiveConf conf, ConfVars confVarName) // property specified file found in local file system // use the specified file if (confVarName == HiveConf.ConfVars.HIVE_EXEC_LOG4J_FILE) { - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); if(queryId == null || (queryId = queryId.trim()).isEmpty()) { queryId = "unknown-" + System.currentTimeMillis(); } - System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId); + System.setProperty(HiveConf.ConfVars.HIVE_QUERY_ID.toString(), queryId); } final boolean async = checkAndSetAsyncLogging(conf); // required for MDC based routing appender so that child threads can inherit the MDC context @@ -157,8 +157,8 @@ private static String initHiveLog4jDefault( if (hive_l4j == null) { hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J); } - System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID)); + System.setProperty(HiveConf.ConfVars.HIVE_QUERY_ID.toString(), + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID)); break; case HIVE_LOG4J_FILE: hive_l4j = LogUtils.class.getClassLoader().getResource(HIVE_L4J); @@ -216,8 +216,8 @@ public static String maskIfPassword(String key, String value) { */ public static void registerLoggingContext(Configuration conf) { if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED)) { - MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVESESSIONID)); - MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID)); + MDC.put(SESSIONID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SESSION_ID)); + MDC.put(QUERYID_LOG_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID)); MDC.put(OPERATIONLOG_LEVEL_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL)); MDC.put(OPERATIONLOG_LOCATION_KEY, HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION)); l4j.info("Thread context registration is done."); diff --git a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java index d7f4b146ed52..f4f7a9cb051f 100644 --- a/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java +++ b/common/src/java/org/apache/hadoop/hive/common/ServerUtils.java @@ -38,7 +38,7 @@ public class ServerUtils { public static void cleanUpScratchDir(HiveConf hiveConf) { if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_START_CLEANUP_SCRATCHDIR)) { - String hiveScratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCHDIR.varname); + String hiveScratchDir = hiveConf.get(HiveConf.ConfVars.SCRATCH_DIR.varname); try { Path jobScratchDir = new Path(hiveScratchDir); LOG.info("Cleaning scratchDir : " + hiveScratchDir); diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index ad807386f360..a1df4493faa8 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -156,7 +156,7 @@ public static ResultFileFormat from(String value) { } public ResultFileFormat getResultFileFormat() { - return ResultFileFormat.from(this.getVar(ConfVars.HIVEQUERYRESULTFILEFORMAT)); + return ResultFileFormat.from(this.getVar(ConfVars.HIVE_QUERY_RESULT_FILEFORMAT)); } public interface EncoderDecoder { @@ -265,21 +265,21 @@ private static URL checkConfigFile(File f) { * be recreated so that the change will take effect. */ public static final HiveConf.ConfVars[] metaVars = { - HiveConf.ConfVars.METASTOREWAREHOUSE, - HiveConf.ConfVars.REPLDIR, - HiveConf.ConfVars.METASTOREURIS, - HiveConf.ConfVars.METASTORESELECTION, + HiveConf.ConfVars.METASTORE_WAREHOUSE, + HiveConf.ConfVars.REPL_DIR, + HiveConf.ConfVars.METASTORE_URIS, + HiveConf.ConfVars.METASTORE_SELECTION, HiveConf.ConfVars.METASTORE_SERVER_PORT, - HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, - HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, + HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, + HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME, - HiveConf.ConfVars.METASTOREPWD, - HiveConf.ConfVars.METASTORECONNECTURLHOOK, - HiveConf.ConfVars.METASTORECONNECTURLKEY, - HiveConf.ConfVars.METASTORESERVERMINTHREADS, - HiveConf.ConfVars.METASTORESERVERMAXTHREADS, + HiveConf.ConfVars.METASTORE_PWD, + HiveConf.ConfVars.METASTORE_CONNECT_URL_HOOK, + HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, + HiveConf.ConfVars.METASTORE_SERVER_MIN_THREADS, + HiveConf.ConfVars.METASTORE_SERVER_MAX_THREADS, HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE, HiveConf.ConfVars.METASTORE_INT_ORIGINAL, HiveConf.ConfVars.METASTORE_INT_ARCHIVED, @@ -315,9 +315,9 @@ private static URL checkConfigFile(File f) { HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX, HiveConf.ConfVars.METASTORE_INIT_HOOKS, HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS, - HiveConf.ConfVars.HMSHANDLERATTEMPTS, - HiveConf.ConfVars.HMSHANDLERINTERVAL, - HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, + HiveConf.ConfVars.HMS_HANDLER_ATTEMPTS, + HiveConf.ConfVars.HMS_HANDLER_INTERVAL, + HiveConf.ConfVars.HMS_HANDLER_FORCE_RELOAD_CONF, HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS, HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, @@ -376,16 +376,16 @@ private static URL checkConfigFile(File f) { * for different databases. */ public static final HiveConf.ConfVars[] dbVars = { - HiveConf.ConfVars.HADOOPBIN, - HiveConf.ConfVars.METASTOREWAREHOUSE, - HiveConf.ConfVars.SCRATCHDIR + HiveConf.ConfVars.HADOOP_BIN, + HiveConf.ConfVars.METASTORE_WAREHOUSE, + HiveConf.ConfVars.SCRATCH_DIR }; /** * encoded parameter values are ;-) encoded. Use decoder to get ;-) decoded string */ static final HiveConf.ConfVars[] ENCODED_CONF = { - ConfVars.HIVEQUERYSTRING + ConfVars.HIVE_QUERY_STRING }; /** @@ -489,31 +489,31 @@ public static enum ConfVars { // QL execution stuff DFS_XATTR_ONLY_SUPPORTED_ON_RESERVED_NAMESPACE("dfs.xattr.supported.only.on.reserved.namespace", false, "DFS supports xattr only on Reserved Name space (/.reserved/raw)"), - SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""), + SCRIPT_WRAPPER("hive.exec.script.wrapper", null, ""), PLAN("hive.exec.plan", "", ""), - STAGINGDIR("hive.exec.stagingdir", ".hive-staging", + STAGING_DIR("hive.exec.stagingdir", ".hive-staging", "Directory name that will be created inside table locations in order to support HDFS encryption. " + "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " + "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."), - SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive", + SCRATCH_DIR("hive.exec.scratchdir", "/tmp/hive", "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " + "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/ is created, " + "with ${hive.scratch.dir.permission}."), - REPLDIR("hive.repl.rootdir","/user/${system:user.name}/repl/", + REPL_DIR("hive.repl.rootdir","/user/${system:user.name}/repl/", "HDFS root dir for all replication dumps."), - REPLCMENABLED("hive.repl.cm.enabled", false, + REPL_CM_ENABLED("hive.repl.cm.enabled", false, "Turn on ChangeManager, so delete files will go to cmrootdir."), - REPLCMDIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/", + REPL_CM_DIR("hive.repl.cmrootdir","/user/${system:user.name}/cmroot/", "Root dir for ChangeManager, used for deleted files."), - REPLCMRETIAN("hive.repl.cm.retain","10d", + REPL_CM_RETAIN("hive.repl.cm.retain","10d", new TimeValidator(TimeUnit.DAYS), "Time to retain removed files in cmrootdir."), - REPLCMENCRYPTEDDIR("hive.repl.cm.encryptionzone.rootdir", ".cmroot", + REPL_CM_ENCRYPTED_DIR("hive.repl.cm.encryptionzone.rootdir", ".cmroot", "Root dir for ChangeManager if encryption zones are enabled, used for deleted files."), - REPLCMFALLBACKNONENCRYPTEDDIR("hive.repl.cm.nonencryptionzone.rootdir", + REPL_CM_FALLBACK_NONENCRYPTED_DIR("hive.repl.cm.nonencryptionzone.rootdir", "", "Root dir for ChangeManager for non encrypted paths if hive.repl.cmrootdir is encrypted."), - REPLCMINTERVAL("hive.repl.cm.interval","3600s", + REPL_CM_INTERVAL("hive.repl.cm.interval","3600s", new TimeValidator(TimeUnit.SECONDS), "Interval for cmroot cleanup thread."), REPL_HA_DATAPATH_REPLACE_REMOTE_NAMESERVICE("hive.repl.ha.datapath.replace.remote.nameservice", false, @@ -694,93 +694,93 @@ public static enum ConfVars { REPL_STATS_TOP_EVENTS_COUNTS("hive.repl.stats.events.count", 5, "Number of topmost expensive events that needs to be maintained per event type for the replication statistics." + " Maximum permissible limit is 10."), - LOCALSCRATCHDIR("hive.exec.local.scratchdir", + LOCAL_SCRATCH_DIR("hive.exec.local.scratchdir", "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", "Local scratch space for Hive jobs"), DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir", "${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources", "Temporary local directory for added resources in the remote file system."), - SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700", + SCRATCH_DIR_PERMISSION("hive.scratch.dir.permission", "700", "The permission for the user specific scratch directories that get created."), - SUBMITVIACHILD("hive.exec.submitviachild", false, ""), - SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true, + SUBMIT_VIA_CHILD("hive.exec.submitviachild", false, ""), + SUBMIT_LOCAL_TASK_VIA_CHILD("hive.exec.submit.local.task.via.child", true, "Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" + "separate JVM (true recommended) or not. \n" + "Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."), - SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000, + SCRIPT_ERROR_LIMIT("hive.exec.script.maxerrsize", 100000, "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" + "This prevents runaway scripts from filling logs partitions to capacity"), - ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false, + ALLOW_PARTIAL_CONSUMP("hive.exec.script.allow.partial.consumption", false, "When enabled, this option allows a user script to exit successfully without consuming \n" + "all the data from the standard input."), - STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:", + STREAM_REPORTER_PREFIX("stream.stderr.reporter.prefix", "reporter:", "Streaming jobs that log to standard error with this prefix can log counter or status information."), - STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true, + STREAM_REPORTER_ENABLED("stream.stderr.reporter.enabled", true, "Enable consumption of status and counter messages for streaming jobs."), - COMPRESSRESULT("hive.exec.compress.output", false, + COMPRESS_RESULT("hive.exec.compress.output", false, "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"), - COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false, + COMPRESS_INTERMEDIATE("hive.exec.compress.intermediate", false, "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"), - COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""), - COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""), - BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000), + COMPRESS_INTERMEDIATE_CODEC("hive.intermediate.compression.codec", "", ""), + COMPRESS_INTERMEDIATE_TYPE("hive.intermediate.compression.type", "", ""), + BYTES_PER_REDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000), "size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."), - MAXREDUCERS("hive.exec.reducers.max", 1009, + MAX_REDUCERS("hive.exec.reducers.max", 1009, "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" + "negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."), - PREEXECHOOKS("hive.exec.pre.hooks", "", + PRE_EXEC_HOOKS("hive.exec.pre.hooks", "", "Comma-separated list of pre-execution hooks to be invoked for each statement. \n" + "A pre-execution hook is specified as the name of a Java class which implements the \n" + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), - POSTEXECHOOKS("hive.exec.post.hooks", "", + POST_EXEC_HOOKS("hive.exec.post.hooks", "", "Comma-separated list of post-execution hooks to be invoked for each statement. \n" + "A post-execution hook is specified as the name of a Java class which implements the \n" + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), - ONFAILUREHOOKS("hive.exec.failure.hooks", "", + ON_FAILURE_HOOKS("hive.exec.failure.hooks", "", "Comma-separated list of on-failure hooks to be invoked for each statement. \n" + "An on-failure hook is specified as the name of Java class which implements the \n" + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), - QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "", + QUERY_REDACTOR_HOOKS("hive.exec.query.redactor.hooks", "", "Comma-separated list of hooks to be invoked for each query which can \n" + "transform the query before it's placed in the job.xml file. Must be a Java class which \n" + "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."), - CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "", + CLIENT_STATS_PUBLISHERS("hive.client.stats.publishers", "", "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" + "A client stats publisher is specified as the name of a Java class which implements the \n" + "org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."), - BASICSTATSTASKSMAXTHREADSFACTOR("hive.basic.stats.max.threads.factor", 2, "Determines the maximum number of " + BASIC_STATS_TASKS_MAX_THREADS_FACTOR("hive.basic.stats.max.threads.factor", 2, "Determines the maximum number of " + "threads that can be used for collection of file level statistics. If the value configured is x, then the " + "maximum number of threads that can be used is x multiplied by the number of available processors. A value" + " of less than 1, makes stats collection sequential."), - EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"), - EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8, + EXEC_PARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"), + EXEC_PARALLEL_THREAD_NUMBER("hive.exec.parallel.thread.number", 8, "How many jobs at most can be executed in parallel"), @Deprecated - HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", false, + HIVE_SPECULATIVE_EXEC_REDUCERS("hive.mapred.reduce.tasks.speculative.execution", false, "(Deprecated) Whether speculative execution for reducers should be turned on. "), - HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L, + HIVE_COUNTERS_PULL_INTERVAL("hive.exec.counters.pull.interval", 1000L, "The interval with which to poll the JobTracker for the counters the running job. \n" + "The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."), - DYNAMICPARTITIONING("hive.exec.dynamic.partition", true, + DYNAMIC_PARTITIONING("hive.exec.dynamic.partition", true, "Whether or not to allow dynamic partitions in DML/DDL."), - DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "nonstrict", + DYNAMIC_PARTITIONING_MODE("hive.exec.dynamic.partition.mode", "nonstrict", new StringSet("strict", "nonstrict"), "In strict mode, the user must specify at least one static partition\n" + "in case the user accidentally overwrites all partitions.\n" + "In nonstrict mode all partitions are allowed to be dynamic."), - DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000, + DYNAMIC_PARTITION_MAX_PARTS("hive.exec.max.dynamic.partitions", 1000, "Maximum number of dynamic partitions allowed to be created in total."), - DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100, + DYNAMIC_PARTITION_MAX_PARTS_PER_NODE("hive.exec.max.dynamic.partitions.pernode", 100, "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."), - DYNAMICPARTITIONCONVERT("hive.exec.dynamic.partition.type.conversion", true, + DYNAMIC_PARTITION_CONVERT("hive.exec.dynamic.partition.type.conversion", true, "Whether to check and cast a dynamic partition column before creating the partition " + "directory. For example, if partition p is type int and we insert string '001', then if " + "this value is true, directory p=1 will be created; if false, p=001"), - MAXCREATEDFILES("hive.exec.max.created.files", 100000L, + MAX_CREATED_FILES("hive.exec.max.created.files", 100000L, "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."), - DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", + DEFAULT_PARTITION_NAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" + "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" + "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."), @@ -841,17 +841,17 @@ public static enum ConfVars { HIVE_IN_REPL_TEST_FILES_SORTED("hive.in.repl.test.files.sorted", false, "internal usage only, set to true if the file listing is required in sorted order during bootstrap load", true), - LOCALMODEAUTO("hive.exec.mode.local.auto", false, + LOCAL_MODE_AUTO("hive.exec.mode.local.auto", false, "Let Hive determine whether to run in local mode automatically"), - LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, + LOCAL_MODE_MAX_BYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."), - LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4, + LOCAL_MODE_MAX_INPUT_FILES("hive.exec.mode.local.auto.input.files.max", 4, "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."), DROP_IGNORES_NON_EXISTENT("hive.exec.drop.ignorenonexistent", true, "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a nonexistent table/view/function"), - HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), + HIVE_IGNORE_MAPJOIN_HINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), HIVE_CONF_LOCKED_LIST("hive.conf.locked.list", "", "Comma separated " + "list of configuration options which are locked and can not be changed at runtime. Warning is logged and the " + @@ -886,28 +886,28 @@ public static enum ConfVars { // Properties with null values are ignored and exist only for the purpose of giving us // a symbolic name to reference in the Hive source code. Properties with non-null // values will override any values set in the underlying Hadoop configuration. - HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true), - YARNBIN("yarn.bin.path", findYarnBinary(), "", true), - MAPREDBIN("mapred.bin.path", findMapRedBinary(), "", true), + HADOOP_BIN("hadoop.bin.path", findHadoopBinary(), "", true), + YARN_BIN("yarn.bin.path", findYarnBinary(), "", true), + MAPRED_BIN("mapred.bin.path", findMapRedBinary(), "", true), HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem", "The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"), - MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true), - MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true), - MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true), - MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true), + MAPRED_MAX_SPLIT_SIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true), + MAPRED_MIN_SPLIT_SIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true), + MAPRED_MIN_SPLIT_SIZE_PER_NODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true), + MAPRED_MIN_SPLIT_SIZE_PER_RACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true), // The number of reduce tasks per job. Hadoop sets this value to 1 by default // By setting this property to -1, Hive will automatically determine the correct // number of reducers. - HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true), + HADOOP_NUM_REDUCERS("mapreduce.job.reduces", -1, "", true), // Metastore stuff. Be sure to update HiveConf.metaVars when you add something here! - METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"), + METASTORE_DB_TYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"), "Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."), /** * @deprecated Use MetastoreConf.WAREHOUSE */ @Deprecated - METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse", + METASTORE_WAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse", "location of default database for the warehouse"), HIVE_METASTORE_WAREHOUSE_EXTERNAL("hive.metastore.warehouse.external.dir", null, @@ -918,14 +918,14 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.THRIFT_URIS */ @Deprecated - METASTOREURIS("hive.metastore.uris", "", + METASTORE_URIS("hive.metastore.uris", "", "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."), /** * @deprecated Use MetastoreConf.THRIFT_URI_SELECTION */ @Deprecated - METASTORESELECTION("hive.metastore.uri.selection", "RANDOM", + METASTORE_SELECTION("hive.metastore.uri.selection", "RANDOM", new StringSet("SEQUENTIAL", "RANDOM"), "Determines the selection mechanism used by metastore client to connect to remote " + "metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified " + @@ -976,13 +976,13 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.THRIFT_CONNECTION_RETRIES */ @Deprecated - METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, + METASTORE_THRIFT_CONNECTION_RETRIES("hive.metastore.connect.retries", 3, "Number of retries while opening a connection to metastore"), /** * @deprecated Use MetastoreConf.THRIFT_FAILURE_RETRIES */ @Deprecated - METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1, + METASTORE_THRIFT_FAILURE_RETRIES("hive.metastore.failure.retries", 1, "Number of retries upon failure of Thrift metastore calls"), /** * @deprecated Use MetastoreConf.SERVER_PORT @@ -1016,25 +1016,25 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.PWD */ @Deprecated - METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine", + METASTORE_PWD("javax.jdo.option.ConnectionPassword", "mine", "password to use against metastore database"), /** * @deprecated Use MetastoreConf.CONNECT_URL_HOOK */ @Deprecated - METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "", + METASTORE_CONNECT_URL_HOOK("hive.metastore.ds.connection.url.hook", "", "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"), /** * @deprecated Use MetastoreConf.MULTITHREADED */ @Deprecated - METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true, + METASTORE_MULTI_THREADED("javax.jdo.option.Multithreaded", true, "Set this to true if multiple threads access metastore through JDO concurrently."), /** * @deprecated Use MetastoreConf.CONNECT_URL_KEY */ @Deprecated - METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL", + METASTORE_CONNECT_URL_KEY("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=metastore_db;create=true", "JDBC connect string for a JDBC metastore.\n" + "To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" + @@ -1050,19 +1050,19 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.HMS_HANDLER_ATTEMPTS */ @Deprecated - HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10, + HMS_HANDLER_ATTEMPTS("hive.hmshandler.retry.attempts", 10, "The number of times to retry a HMSHandler call if there were a connection error."), /** * @deprecated Use MetastoreConf.HMS_HANDLER_INTERVAL */ @Deprecated - HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms", + HMS_HANDLER_INTERVAL("hive.hmshandler.retry.interval", "2000ms", new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."), /** * @deprecated Use MetastoreConf.HMS_HANDLER_FORCE_RELOAD_CONF */ @Deprecated - HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false, + HMS_HANDLER_FORCE_RELOAD_CONF("hive.hmshandler.force.reload.conf", false, "Whether to force reloading of the HMSHandler configuration (including\n" + "the connection URL, before the next metastore query that accesses the\n" + "datastore. Once reloaded, this value is reset to false. Used for\n" + @@ -1071,19 +1071,19 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.SERVER_MAX_MESSAGE_SIZE */ @Deprecated - METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L, + METASTORE_SERVER_MAX_MESSAGE_SIZE("hive.metastore.server.max.message.size", 100*1024*1024L, "Maximum message size in bytes a HMS will accept."), /** * @deprecated Use MetastoreConf.SERVER_MIN_THREADS */ @Deprecated - METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200, + METASTORE_SERVER_MIN_THREADS("hive.metastore.server.min.threads", 200, "Minimum number of worker threads in the Thrift server's pool."), /** * @deprecated Use MetastoreConf.SERVER_MAX_THREADS */ @Deprecated - METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000, + METASTORE_SERVER_MAX_THREADS("hive.metastore.server.max.threads", 1000, "Maximum number of worker threads in the Thrift server's pool."), /** * @deprecated Use MetastoreConf.TCP_KEEP_ALIVE @@ -1301,9 +1301,10 @@ public static enum ConfVars { */ @Deprecated METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300, + new RangeValidator(1, null), "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" + "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" + - "but it may also cause higher memory requirement at the client side."), + "but it may also cause higher memory requirement at the client side. Batch value should be greater than 0."), /** * @deprecated Use MetastoreConf.BATCH_RETRIEVE_OBJECTS_MAX */ @@ -1502,7 +1503,7 @@ public static enum ConfVars { @Deprecated METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES( "hive.metastore.disallow.incompatible.col.type.changes", true, - "If true (default is false), ALTER TABLE operations which change the type of a\n" + + "If true (default is true), ALTER TABLE operations which change the type of a\n" + "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" + "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" + "datatypes can be converted from string to any type. The map is also serialized as\n" + @@ -1522,7 +1523,7 @@ public static enum ConfVars { "This limits the number of partitions that can be requested from the metastore for a given table.\n" + "The default value \"-1\" means no limit."), - NEWTABLEDEFAULTPARA("hive.table.parameters.default", "", + NEW_TABLE_DEFAULT_PARA("hive.table.parameters.default", "", "Default property values for newly created tables"), /** * @deprecated With HIVE-25813 table properties of source tables will not be copied over to dest table. @@ -1728,10 +1729,10 @@ public static enum ConfVars { "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."), // CLI - CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""), - CLIPRINTCURRENTDB("hive.cli.print.current.db", false, + CLI_IGNORE_ERRORS("hive.cli.errors.ignore", false, ""), + CLI_PRINT_CURRENT_DB("hive.cli.print.current.db", false, "Whether to include the current database in the Hive prompt."), - CLIPROMPT("hive.cli.prompt", "hive", + CLI_PROMPT("hive.cli.prompt", "hive", "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" + "Variable substitution will only be invoked at the Hive CLI startup."), /** @@ -1743,9 +1744,9 @@ public static enum ConfVars { // Things we log in the jobconf // session identifier - HIVESESSIONID("hive.session.id", "", ""), + HIVE_SESSION_ID("hive.session.id", "", ""), // whether session is running in silent mode or not - HIVESESSIONSILENT("hive.session.silent", false, ""), + HIVE_SESSION_SILENT("hive.session.silent", false, ""), HIVE_LOCAL_TIME_ZONE("hive.local.time.zone", "LOCAL", "Sets the time-zone for displaying and interpreting time stamps. If this property value is set to\n" + @@ -1756,35 +1757,35 @@ public static enum ConfVars { HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false, "Whether to log Hive query, query plan, runtime statistics etc."), - HIVEQUERYSTRING("hive.query.string", "", + HIVE_QUERY_STRING("hive.query.string", "", "Query being executed (might be multiple per a session)"), - HIVEQUERYID("hive.query.id", "", + HIVE_QUERY_ID("hive.query.id", "", "ID for query being executed (might be multiple per a session)"), - HIVEQUERYTAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " + + HIVE_QUERY_TAG("hive.query.tag", null, "Tag for the queries in the session. User can kill the queries with the tag " + "in another session. Currently there is no tag duplication check, user need to make sure his tag is unique. " + "Also 'kill query' needs to be issued to all HiveServer2 instances to proper kill the queries"), - HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"), + HIVE_JOBNAME_LENGTH("hive.jobname.length", 50, "max jobname length"), // hive jar - HIVEJAR("hive.jar.path", "", + HIVE_JAR("hive.jar.path", "", "The location of hive_cli.jar that is used when submitting jobs in a separate jvm."), - HIVEAUXJARS("hive.aux.jars.path", "", + HIVE_AUX_JARS("hive.aux.jars.path", "", "The location of the plugin jars that contain implementations of user defined functions and serdes."), // reloadable jars - HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "", + HIVE_RELOADABLE_JARS("hive.reloadable.aux.jars.path", "", "The locations of the plugin jars, which can be a comma-separated folders or jars. Jars can be renewed\n" + "by executing reload command. And these jars can be " + "used as the auxiliary classes like creating a UDF or SerDe."), // hive added files and jars - HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."), - HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."), - HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."), - HIVEADDFILESUSEHDFSLOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of " + HIVE_ADDED_FILES("hive.added.files.path", "", "This an internal parameter."), + HIVE_ADDED_JARS("hive.added.jars.path", "", "This an internal parameter."), + HIVE_ADDED_ARCHIVES("hive.added.archives.path", "", "This an internal parameter."), + HIVE_ADD_FILES_USE_HDFS_LOCATION("hive.resource.use.hdfs.location", true, "Reference HDFS based files/jars directly instead of " + "copy to session based HDFS scratch directory, to make distributed cache more useful."), HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true), @@ -1794,17 +1795,17 @@ public static enum ConfVars { new TimeValidator(TimeUnit.SECONDS), "How long to run autoprogressor for the script/UDTF operators.\n" + "Set to 0 for forever."), - HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false, + HIVE_SCRIPT_AUTO_PROGRESS("hive.script.auto.progress", false, "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" + "to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" + "outputting to stderr. This option removes the need of periodically producing stderr messages, \n" + "but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."), - HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID", + HIVE_SCRIPT_ID_ENV_VAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID", "Name of the environment variable that holds the unique script operator ID in the user's \n" + "transform function (the custom mapper/reducer that the user has specified in the query)"), - HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false, + HIVE_SCRIPT_TRUNCATE_ENV("hive.script.operator.truncate.env", false, "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"), - HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", + HIVE_SCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", "hive.txn.valid.txns,hive.txn.tables.valid.writeids,hive.txn.valid.writeids,hive.script.operator.env.blacklist,hive.repl.current.table.write.id", "Comma separated list of keys from the configuration file not to convert to environment " + "variables when invoking the script operator"), @@ -1837,16 +1838,16 @@ public static enum ConfVars { "Set the owner of files loaded using load data in managed tables."), @Deprecated - HIVEMAPREDMODE("hive.mapred.mode", null, + HIVE_MAPRED_MODE("hive.mapred.mode", null, "Deprecated; use hive.strict.checks.* settings instead."), - HIVEALIAS("hive.alias", "", ""), - HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"), - HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"), + HIVE_ALIAS("hive.alias", "", ""), + HIVE_MAPSIDE_AGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"), + HIVE_GROUPBY_SKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"), HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS("hive.join.shortcut.unmatched.rows", true, "Enables to shortcut processing of known filtered rows in merge joins. internal use only. may affect correctness"), - HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000, + HIVE_JOIN_EMIT_INTERVAL("hive.join.emit.interval", 1000, "How many rows in the right-most join operand Hive should buffer before emitting the join result."), - HIVEJOINCACHESIZE("hive.join.cache.size", 25000, + HIVE_JOIN_CACHE_SIZE("hive.join.cache.size", 25000, "How many rows in the joining tables (except the streaming table) should be cached in memory."), HIVE_PUSH_RESIDUAL_INNER("hive.join.inner.residual", false, "Whether to push non-equi filter predicates within inner joins. This can improve efficiency in " @@ -1962,31 +1963,31 @@ public static enum ConfVars { // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, // need to remove by hive .13. Also, do not change default (see SMB operator) - HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), + HIVE_MAPJOIN_BUCKET_CACHE_SIZE("hive.mapjoin.bucket.cache.size", 100, ""), - HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true, + HIVE_MAPJOIN_USE_OPTIMIZED_TABLE("hive.mapjoin.optimized.hashtable", true, "Whether Hive should use memory-optimized hash table for MapJoin.\n" + "Only works on Tez because memory-optimized hashtable cannot be serialized."), - HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent", + HIVE_MAPJOIN_OPTIMIZED_TABLE_PROBE_PERCENT("hive.mapjoin.optimized.hashtable.probe.percent", (float) 0.5, "Probing space percentage of the optimized hashtable"), - HIVEMAPJOINPARALELHASHTABLETHREADS("hive.mapjoin.hashtable.load.threads", 2, + HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS("hive.mapjoin.hashtable.load.threads", 2, "Number of threads used to load records from a broadcast edge in HT used for MJ"), - HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", false, "Whether to use hybrid" + + HIVE_USE_HYBRIDGRACE_HASHJOIN("hive.mapjoin.hybridgrace.hashtable", false, "Whether to use hybrid" + "grace hash join as the join method for mapjoin. Tez only."), - HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " + + HIVE_HYBRIDGRACE_HASHJOIN_MEMCHECK_FREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " + "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " + "This number should be power of 2."), - HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" + + HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" + "Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."), - HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" + + HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" + "Hybrid grace hash join, the minimum number of partitions to create."), - HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024, + HIVE_HASHTABLE_WB_SIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024, "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" + "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" + "joins unnecessary memory will be allocated and then trimmed."), - HIVEHYBRIDGRACEHASHJOINBLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " + + HIVE_HYBRIDGRACE_HASHJOIN_BLOOMFILTER("hive.mapjoin.hybridgrace.bloomfilter", true, "Whether to " + "use BloomFilter in Hybrid grace hash join to minimize unnecessary spilling."), - HIVEMAPJOINFULLOUTER("hive.mapjoin.full.outer", true, + HIVE_MAPJOIN_FULL_OUTER("hive.mapjoin.full.outer", true, "Whether to use MapJoin for FULL OUTER JOINs."), HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE( "hive.test.mapjoin.full.outer.override", @@ -1995,33 +1996,33 @@ public static enum ConfVars { "setting. Using enable will force it on and disable will force it off.\n" + "The default none is do nothing, of course", true), - HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000, + HIVE_SMBJOIN_CACHE_ROWS("hive.smbjoin.cache.rows", 10000, "How many rows with the same key value should be cached in memory per smb joined table."), - HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000, + HIVE_GROUPBY_MAP_INTERVAL("hive.groupby.mapaggr.checkinterval", 100000, "Number of rows after which size of the grouping keys/aggregation classes is performed"), - HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5, + HIVE_MAP_AGGR_HASH_MEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5, "Portion of total memory to be used by map-side group aggregation hash table"), - HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3, + HIVE_MAPJOIN_FOLLOWEDBY_MAP_AGGR_HASH_MEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3, "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"), - HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9, + HIVE_MAP_AGGR_MEMORY_THRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9, "The max memory to be used by map-side group aggregation hash table.\n" + "If the memory usage is higher than this number, force to flush data"), - HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.99, + HIVE_MAP_AGGR_HASH_MIN_REDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.99, "Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" + "Set to 1 to make sure hash aggregation is never turned off."), - HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND("hive.map.aggr.hash.min.reduction.lower.bound", (float) 0.4, + HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND("hive.map.aggr.hash.min.reduction.lower.bound", (float) 0.4, "Lower bound of Hash aggregate reduction filter. See also: hive.map.aggr.hash.min.reduction"), - HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST("hive.map.aggr.hash.min.reduction.stats", true, + HIVE_MAP_AGGR_HASH_MIN_REDUCTION_STATS_ADJUST("hive.map.aggr.hash.min.reduction.stats", true, "Whether the value for hive.map.aggr.hash.min.reduction should be set statically using stats estimates. \n" + "If this is enabled, the default value for hive.map.aggr.hash.min.reduction is only used as an upper-bound\n" + "for the value set in the map-side group by operators."), - HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true, + HIVE_MULTI_GROUPBY_SINGLE_REDUCER("hive.multigroupby.singlereducer", true, "Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" + "common group by keys, it will be optimized to generate single M/R job."), - HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true, + HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", false, "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" + - "the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" + - "is that it limits the number of mappers to the number of files."), + "the group by in the mapper by using BucketizedHiveInputFormat. This can only work if the number of files to be\n" + + "processed is exactly 1. The downside to this is that it limits the number of mappers to the number of files."), HIVE_DEFAULT_NULLS_LAST("hive.default.nulls.last", true, "Whether to set NULLS LAST as the default null ordering for ASC order and " + "NULLS FIRST for DESC order."), @@ -2055,14 +2056,14 @@ public static enum ConfVars { "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), // for hive udtf operator - HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false, + HIVE_UDTF_AUTO_PROGRESS("hive.udtf.auto.progress", false, "Whether Hive should automatically send progress information to TaskTracker \n" + "when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" + "because this may prevent TaskTracker from killing tasks with infinite loops."), - HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"), + HIVE_DEFAULT_FILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC", "parquet"), "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"), - HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none", + HIVE_DEFAULT_MANAGED_FILEFORMAT("hive.default.fileformat.managed", "none", new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC", "parquet"), "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + @@ -2071,17 +2072,17 @@ public static enum ConfVars { "Default storage handler class for CREATE TABLE statements. If this is set to a valid class, a 'CREATE TABLE ... STORED AS ... LOCATION ...' command will " + "be equivalent to 'CREATE TABLE ... STORED BY [default.storage.handler.class] LOCATION ...'. Any STORED AS clauses will be ignored, given that STORED BY and STORED AS are " + "incompatible within the same command. Users can explicitly override the default class by issuing 'CREATE TABLE ... STORED BY [overriding.storage.handler.class] ...'"), - HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", ResultFileFormat.SEQUENCEFILE.toString(), + HIVE_QUERY_RESULT_FILEFORMAT("hive.query.result.fileformat", ResultFileFormat.SEQUENCEFILE.toString(), new StringSet(ResultFileFormat.getValidSet()), "Default file format for storing result of the query."), - HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), + HIVE_CHECK_FILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), // default serde for rcfile - HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde", + HIVE_DEFAULT_RCFILE_SERDE("hive.default.rcfile.serde", "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe", "The default SerDe Hive will use for the RCFile format"), - HIVEDEFAULTSERDE("hive.default.serde", + HIVE_DEFAULT_SERDE("hive.default.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", "The default SerDe Hive will use for storage formats that do not specify a SerDe."), @@ -2089,7 +2090,7 @@ public static enum ConfVars { * @deprecated Use MetastoreConf.SERDES_USING_METASTORE_FOR_SCHEMA */ @Deprecated - SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema", + SERDES_USING_METASTORE_FOR_SCHEMA("hive.serdes.using.metastore.for.schema", "org.apache.hadoop.hive.ql.io.orc.OrcSerde," + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," + @@ -2104,9 +2105,9 @@ public static enum ConfVars { HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES("hive.legacy.schema.for.all.serdes", false, "A backward compatibility setting for external metastore users that do not handle \n" + - SERDESUSINGMETASTOREFORSCHEMA.varname + " correctly. This may be removed at any time."), + SERDES_USING_METASTORE_FOR_SCHEMA.varname + " correctly. This may be removed at any time."), - HIVEHISTORYFILELOC("hive.querylog.location", + HIVE_HISTORY_FILE_LOC("hive.querylog.location", "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", "Location of Hive run time structured log file"), @@ -2125,26 +2126,26 @@ public static enum ConfVars { "logged less frequently than specified.\n" + "This only has an effect if hive.querylog.enable.plan.progress is set to true."), - HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + HIVE_SCRIPT_SERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", "The default SerDe for transmitting input data to and reading output data from the user scripts. "), - HIVESCRIPTRECORDREADER("hive.script.recordreader", + HIVE_SCRIPT_RECORD_READER("hive.script.recordreader", "org.apache.hadoop.hive.ql.exec.TextRecordReader", "The default record reader for reading data from the user scripts. "), - HIVESCRIPTRECORDWRITER("hive.script.recordwriter", + HIVE_SCRIPT_RECORD_WRITER("hive.script.recordwriter", "org.apache.hadoop.hive.ql.exec.TextRecordWriter", "The default record writer for writing data to the user scripts. "), - HIVESCRIPTESCAPE("hive.transform.escape.input", false, + HIVE_SCRIPT_ESCAPE("hive.transform.escape.input", false, "This adds an option to escape special chars (newlines, carriage returns and\n" + "tabs) when they are passed to the user script. This is useful if the Hive tables\n" + "can contain data that contains special characters."), - HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000, + HIVE_BINARY_RECORD_MAX("hive.binary.record.max.length", 1000, "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" + "The last record before the end of stream can have less than hive.binary.record.max.length bytes"), - HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"), + HIVE_HADOOP_MAX_MEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"), //small table file size - HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L, + HIVE_SMALL_TABLES_FILESIZE("hive.mapjoin.smalltable.filesize", 25000000L, "The threshold for the input file size of the small tables; if the file size is smaller \n" + "than this threshold, it will try to convert the common join into map join"), @@ -2168,53 +2169,53 @@ public static enum ConfVars { "If true, VectorizedOrcAcidRowBatchReader will compute min/max " + "ROW__ID for the split and only load delete events in that range.\n" ), - HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0, + HIVE_SAMPLE_RANDOM_NUM("hive.sample.seednumber", 0, "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."), // test mode in hive mode - HIVETESTMODE("hive.test.mode", false, + HIVE_TEST_MODE("hive.test.mode", false, "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", false), - HIVEEXIMTESTMODE("hive.exim.test.mode", false, + HIVE_EXIM_TEST_MODE("hive.exim.test.mode", false, "The subset of test mode that only enables custom path handling for ExIm.", false), - HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_", + HIVE_TEST_MODE_PREFIX("hive.test.mode.prefix", "test_", "In test mode, specifies prefixes for the output table", false), - HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32, + HIVE_TEST_MODE_SAMPLE_FREQ("hive.test.mode.samplefreq", 32, "In test mode, specifies sampling frequency for table, which is not bucketed,\n" + "For example, the following query:\n" + " INSERT OVERWRITE TABLE dest SELECT col1 from src\n" + "would be converted to\n" + " INSERT OVERWRITE TABLE test_dest\n" + " SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false), - HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "", + HIVE_TEST_MODE_NOSAMPLE("hive.test.mode.nosamplelist", "", "In test mode, specifies comma separated table names which would not apply sampling", false), - HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false), - HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false), - HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false), - HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false), - HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false), - HIVETESTMODEFAILLOADDYNAMICPARTITION("hive.test.fail.load.dynamic.partition", false, "For testing only. Will cause loadDynamicPartition to fail.", false), - HIVETESTMODEFAILHEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false), + HIVE_TEST_MODE_DUMMY_STAT_AGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false), + HIVE_TEST_MODE_DUMMY_STAT_PUB("hive.test.dummystats.publisher", "", "internal variable for test", false), + HIVE_TEST_CURRENT_TIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false), + HIVE_TEST_MODE_ROLLBACK_TXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false), + HIVE_TEST_MODE_FAIL_COMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false), + HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION("hive.test.fail.load.dynamic.partition", false, "For testing only. Will cause loadDynamicPartition to fail.", false), + HIVE_TEST_MODE_FAIL_HEARTBEATER("hive.test.fail.heartbeater", false, "For testing only. Will cause Heartbeater to fail.", false), TESTMODE_BUCKET_CODEC_VERSION("hive.test.bucketcodec.version", 1, "For testing only. Will make ACID subsystem write RecordIdentifier.bucketId in specified\n" + "format", false), HIVE_EXTEND_BUCKET_ID_RANGE("hive.extend.bucketid.range", true, "Dynamically allocate some bits from statement id when bucket id overflows. This allows having more than 4096 buckets."), - HIVETESTMODEACIDKEYIDXSKIP("hive.test.acid.key.index.skip", false, "For testing only. OrcRecordUpdater will skip " + HIVE_TEST_MODE_ACID_KEY_IDX_SKIP("hive.test.acid.key.index.skip", false, "For testing only. OrcRecordUpdater will skip " + "generation of the hive.acid.key.index", false), - HIVEMERGEMAPFILES("hive.merge.mapfiles", true, + HIVE_MERGE_MAPFILES("hive.merge.mapfiles", true, "Merge small files at the end of a map-only job"), - HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false, + HIVE_MERGE_MAPRED_FILES("hive.merge.mapredfiles", false, "Merge small files at the end of a map-reduce job"), - HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"), - HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000), + HIVE_MERGE_TEZ_FILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"), + HIVE_MERGE_MAP_FILES_SIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000), "Size of merged files at the end of the job"), - HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000), + HIVE_MERGE_MAP_FILES_AVG_SIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000), "When the average output file size of a job is less than this number, Hive will start an additional \n" + "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" + "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."), - HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""), - HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true, + HIVE_MERGE_RCFILE_BLOCK_LEVEL("hive.merge.rcfile.block.level", true, ""), + HIVE_MERGE_ORC_FILE_STRIPE_LEVEL("hive.merge.orcfile.stripe.level", true, "When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" + "while writing a table with ORC file format, enabling this config will do stripe-level\n" + "fast merge for small ORC files. Note that enabling this config will not honor the\n" + @@ -2229,12 +2230,14 @@ public static enum ConfVars { HIVE_ICEBERG_MASK_DEFAULT_LOCATION("hive.iceberg.mask.default.location", false, "If this is set to true the URI for auth will have the default location masked with DEFAULT_TABLE_LOCATION"), + HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY("hive.iceberg.allow.datafiles.in.table.location.only", false, + "If this is set to true, then all the data files being read should be withing the table location"), - HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true, + HIVE_USE_EXPLICIT_RCFILE_HEADER("hive.exec.rcfile.use.explicit.header", true, "If this is set the header for RCFiles will simply be RCF. If this is not\n" + "set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" + "by the input and output RCFile formats."), - HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""), + HIVE_USE_RCFILE_SYNC_CACHE("hive.exec.rcfile.use.sync.cache", true, ""), HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""), HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""), @@ -2370,109 +2373,122 @@ public static enum ConfVars { "The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" + "boolean literal."), - HIVESKEWJOIN("hive.optimize.skewjoin", false, + HIVE_SKEW_JOIN("hive.optimize.skewjoin", false, "Whether to enable skew join optimization. \n" + "The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" + "processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" + "job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" + "the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" + "map-join."), - HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false, + HIVE_DYNAMIC_PARTITION_HASHJOIN("hive.optimize.dynamic.partition.hashjoin", false, "Whether to enable dynamically partitioned hash join optimization. \n" + "This setting is also dependent on enabling hive.auto.convert.join"), - HIVECONVERTJOIN("hive.auto.convert.join", true, + HIVE_CONVERT_JOIN("hive.auto.convert.join", true, "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"), - HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true, + HIVE_CONVERT_JOIN_NOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true, "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" + "If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" + "specified size, the join is directly converted to a mapjoin (there is no conditional task)."), HIVE_CONVERT_ANTI_JOIN("hive.auto.convert.anti.join", true, "Whether Hive enables the optimization about converting join with null filter to anti join."), - HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size", + HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD("hive.auto.convert.join.noconditionaltask.size", 10000000L, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + "However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" + "the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"), - HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false, + HIVE_CONVERT_JOIN_USE_NONSTAGED("hive.auto.convert.join.use.nonstaged", false, "For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" + "filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" + "Currently, this is not working with vectorization or tez execution engine."), - HIVESKEWJOINKEY("hive.skewjoin.key", 100000, + HIVE_SKEWJOIN_KEY("hive.skewjoin.key", 100000, "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" + "we think the key as a skew join key. "), - HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000, + HIVE_SKEWJOIN_MAPJOIN_NUM_MAP_TASK("hive.skewjoin.mapjoin.map.tasks", 10000, "Determine the number of map task used in the follow up map join job for a skew join.\n" + "It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine-grained control."), - HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L, + HIVE_SKEWJOIN_MAPJOIN_MIN_SPLIT("hive.skewjoin.mapjoin.min.split", 33554432L, "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" + "the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine-grained control."), - HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000, + HIVE_SEND_HEARTBEAT("hive.heartbeat.interval", 1000, "Send a heartbeat after this interval - used by mapjoin and filter operators"), - HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L, + HIVE_LIMIT_MAX_ROW_SIZE("hive.limit.row.max.size", 100000L, "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."), - HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10, - "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."), - HIVELIMITOPTENABLE("hive.limit.optimize.enable", false, + HIVE_LIMIT_OPT_LIMIT_FILE("hive.limit.optimize.limit.file", 10, + "When trying a smaller subset of data for simple LIMIT, maximum number of fil:wq:es we can sample."), + HIVE_LIMIT_OPT_ENABLE("hive.limit.optimize.enable", false, "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."), - HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000, + HIVE_LIMIT_OPT_MAX_FETCH("hive.limit.optimize.fetch.max", 50000, "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" + "Insert queries are not restricted by this limit."), - HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(), + HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(), "The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."), - HIVECONVERTJOINMAXENTRIESHASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L, + HIVE_CONVERT_JOIN_MAX_ENTRIES_HASHTABLE("hive.auto.convert.join.hashtable.max.entries", 21000000L, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + "However, if it is on, and the predicted number of entries in hashtable for a given join \n" + "input is larger than this number, the join will not be converted to a mapjoin. \n" + "The value \"-1\" means no limit."), - XPRODSMALLTABLEROWSTHRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side" + XPROD_SMALL_TABLE_ROWS_THRESHOLD("hive.xprod.mapjoin.small.table.rows", 1,"Maximum number of rows on build side" + " of map join before it switches over to cross product edge"), - HIVECONVERTJOINMAXSHUFFLESIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L, + HIVE_CONVERT_JOIN_MAX_SHUFFLE_SIZE("hive.auto.convert.join.shuffle.max.size", 10000000000L, "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + "However, if it is on, and the predicted size of the larger input for a given join is greater \n" + "than this number, the join will not be converted to a dynamically partitioned hash join. \n" + "The value \"-1\" means no limit."), - HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f, + HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT("hive.hashtable.key.count.adjustment", 0.99f, "Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" + " of the number of keys is divided by this value. If the value is 0, statistics are not used" + "and hive.hashtable.initialCapacity is used instead."), - HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " + + HIVE_HASHTABLE_THRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " + "mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"), - HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""), - HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55, + HIVE_HASHTABLE_LOAD_FACTOR("hive.hashtable.loadfactor", (float) 0.75, ""), + HIVE_HASHTABLE_FOLLOWBY_GBY_MAX_MEMORY_USAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55, "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" + "when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" + "the local task will abort by itself. It means the data of the small table is too large " + "to be held in memory."), - HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90, + HIVE_HASHTABLE_MAX_MEMORY_USAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90, "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" + "If the local task's memory usage is more than this number, the local task will abort by itself. \n" + "It means the data of the small table is too large to be held in memory."), - HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000, + HIVE_HASHTABLE_SCALE("hive.mapjoin.check.memory.rows", (long)100000, "The number means after how many rows processed it needs to check the memory usage"), - HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat", + HIVE_INPUT_FORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat", "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."), - HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat", + HIVE_TEZ_INPUT_FORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat", "The default input format for tez. Tez groups splits in the AM."), - HIVETEZCONTAINERSIZE("hive.tez.container.size", -1, - "By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."), - HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1, + HIVE_TEZ_CONTAINER_SIZE("hive.tez.container.size", -1, + "The memory in MB that's used by a Tez task container (TezChild) in Tez container mode. Hive uses this \n" + + "property to create a Resource object which is accepted by Yarn (and used in TezAM to ask for TezChild \n" + + "containers). This should be distinguished from the Tez AM's (DAGAppMaster) memory, \n" + + "which is driven by tez.am.resource.memory.mb! \n" + + "Also, as Hive takes care of TezChild memory by setting this option, there is no need \n " + + "to set tez.task.resource.memory.mb differently. \n" + + "The final -Xmx arg for TezChild process is not equal to this setting, \n " + + "because Tez considers a heap fraction (80%), so by default: \n" + + "Xmx = hive.tez.container.size * tez.container.max.java.heap.fraction. \n" + + "In case of values <= 0, container size falls back to mapreduce.map.memory.mb. \n" + + "LLAP notes: while generating splits, the needed per-task resource is derived from this option \n" + + "(refer to HiveSplitGenerator, TezAvailableSlotsCalculator), so even if its value doesn't change the \n" + + "LLAP daemons' total physical size, it has to be configured properly. In this context \n" + + "4096 implies that you assume a single task will consume 4096MB from a daemon's shared heap."), + HIVE_TEZ_CPU_VCORES("hive.tez.cpu.vcores", -1, "By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" + "This can be used to overwrite."), - HIVETEZJAVAOPTS("hive.tez.java.opts", null, + HIVE_TEZ_JAVA_OPTS("hive.tez.java.opts", null, "By default Tez will use the Java options from map tasks. This can be used to overwrite."), - HIVETEZLOGLEVEL("hive.tez.log.level", "INFO", + HIVE_TEZ_LOG_LEVEL("hive.tez.log.level", "INFO", "The log level to use for tasks executing as part of the DAG.\n" + "Used only if hive.tez.java.opts is used to configure Java options."), - HIVETEZHS2USERACCESS("hive.tez.hs2.user.access", true, + HIVE_TEZ_HS2_USER_ACCESS("hive.tez.hs2.user.access", true, "Whether to grant access to the hs2/hive user for queries"), - HIVEQUERYNAME ("hive.query.name", null, + HIVE_QUERY_NAME("hive.query.name", null, "This named is used by Tez to set the dag name. This name in turn will appear on \n" + "the Tez UI representing the work that was done."), - HIVETEZJOBNAME("tez.job.name", "HIVE-%s", + HIVE_TEZ_JOB_NAME("tez.job.name", "HIVE-%s", "This named is used by Tez to set the job name. This name in turn will appear on \n" + "the Yarn UI representing the Yarn Application Name. And The job name may be a \n" + "Java String.format() string, to which the session ID will be supplied as the single parameter."), @@ -2488,15 +2504,15 @@ public static enum ConfVars { "This time slice should align with the flush interval of the logging library else file pruning may\n" + "incorrectly prune files leading to incorrect results from sys.logs table."), - HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true, + HIVE_OPTIMIZE_BUCKETING_SORTING("hive.optimize.bucketingsorting", true, "Don't create a reducer for enforcing \n" + "bucketing/sorting for queries of the form: \n" + "insert overwrite table T2 select * from T1;\n" + "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."), - HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""), - HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false, + HIVE_PARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""), + HIVE_ENFORCE_SORT_MERGE_BUCKET_MAPJOIN("hive.enforce.sortmergebucketmapjoin", false, "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"), - HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false, + HIVE_ENFORCE_BUCKET_MAPJOIN("hive.enforce.bucketmapjoin", false, "If the user asked for bucketed map-side join, and it cannot be performed, \n" + "should the query fail or not ? For example, if the buckets in the tables being joined are\n" + "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" + @@ -2536,24 +2552,24 @@ public static enum ConfVars { "with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" + "if the complete small table can fit in memory, and a map-join can be performed."), - HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""), - HIVEROWOFFSET("hive.exec.rowoffset", false, + HIVE_SCRIPT_OPERATOR_TRUST("hive.exec.script.trust", false, ""), + HIVE_ROW_OFFSET("hive.exec.rowoffset", false, "Whether to provide the row offset virtual column"), // Optimizer - HIVEOPTINDEXFILTER("hive.optimize.index.filter", true, "Whether to enable automatic use of indexes"), + HIVE_OPT_INDEX_FILTER("hive.optimize.index.filter", true, "Whether to enable automatic use of indexes"), - HIVEOPTPPD("hive.optimize.ppd", true, + HIVE_OPT_PPD("hive.optimize.ppd", true, "Whether to enable predicate pushdown"), - HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true, + HIVE_OPT_PPD_WINDOWING("hive.optimize.ppd.windowing", true, "Whether to enable predicate pushdown through windowing"), - HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true, + HIVE_PPD_RECOGNIZE_TRANSITIVITY("hive.ppd.recognizetransivity", true, "Whether to transitively replicate predicate filters over equijoin conditions."), - HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES("hive.ppd.recognize.column.equalities", true, + HIVE_PPD_RECOGNIZE_COLUMN_EQUALITIES("hive.ppd.recognize.column.equalities", true, "Whether we should traverse the join branches to discover transitive propagation opportunities over" + " equijoin conditions. \n" + "Requires hive.ppd.recognizetransivity to be set to true."), - HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true, + HIVE_PPD_REMOVE_DUPLICATE_FILTERS("hive.ppd.remove.duplicatefilters", true, "During query optimization, filters may be pushed down in the operator tree. \n" + "If this config is true only pushed down filters remain in the operator tree, \n" + "and the original filter is removed. If this config is false, the original filter \n" + @@ -2562,44 +2578,44 @@ public static enum ConfVars { true, "Whether to transitively infer disjunctive predicates across joins. \n" + "Disjunctive predicates are hard to simplify and pushing them down might lead to infinite rule matching " + "causing stackoverflow and OOM errors"), - HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true, + HIVE_POINT_LOOKUP_OPTIMIZER("hive.optimize.point.lookup", true, "Whether to transform OR clauses in Filter operators into IN clauses"), - HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 2, + HIVE_POINT_LOOKUP_OPTIMIZER_MIN("hive.optimize.point.lookup.min", 2, "Minimum number of OR clauses needed to transform into IN clauses"), HIVEOPT_TRANSFORM_IN_MAXNODES("hive.optimize.transform.in.maxnodes", 16, "Maximum number of IN expressions beyond which IN will not be transformed into OR clause"), - HIVECOUNTDISTINCTOPTIMIZER("hive.optimize.countdistinct", true, + HIVE_COUNT_DISTINCT_OPTIMIZER("hive.optimize.countdistinct", true, "Whether to transform count distinct into two stages"), - HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true, + HIVE_PARTITION_COLUMN_SEPARATOR("hive.optimize.partition.columns.separate", true, "Extract partition columns from IN clauses"), // Constant propagation optimizer - HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"), - HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"), - HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", false, + HIVE_OPT_CONSTANT_PROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"), + HIVE_IDENTITY_PROJECT_REMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"), + HIVE_METADATA_ONLY_QUERIES("hive.optimize.metadataonly", false, "Whether to eliminate scans of the tables from which no columns are selected. Note\n" + "that, when selecting from empty tables with data files, this can produce incorrect\n" + "results, so it's disabled by default. It works correctly for normal tables."), - HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Don't scan relations which are guaranteed to not generate any rows"), - HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true, + HIVE_NULL_SCAN_OPTIMIZE("hive.optimize.null.scan", true, "Don't scan relations which are guaranteed to not generate any rows"), + HIVE_OPT_PPD_STORAGE("hive.optimize.ppd.storage", true, "Whether to push predicates down to storage handlers"), - HIVEOPTGROUPBY("hive.optimize.groupby", true, + HIVE_OPT_GROUPBY("hive.optimize.groupby", true, "Whether to enable the bucketed group by from bucketed partitions/tables."), - HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false, + HIVE_OPT_BUCKET_MAPJOIN("hive.optimize.bucketmapjoin", false, "Whether to try bucket mapjoin"), - HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false, + HIVE_OPT_SORT_MERGE_BUCKET_MAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false, "Whether to try sorted bucket merge map join"), - HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true, + HIVE_OPT_REDUCE_DEDUPLICATION("hive.optimize.reducededuplication", true, "Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" + "This should always be set to true. Since it is a new feature, it has been made configurable."), - HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4, + HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER("hive.optimize.reducededuplication.min.reducer", 4, "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" + "That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" + "The optimization will be automatically disabled if number of reducers would be less than specified value."), - HIVEOPTJOINREDUCEDEDUPLICATION("hive.optimize.joinreducededuplication", true, + HIVE_OPT_JOIN_REDUCE_DEDUPLICATION("hive.optimize.joinreducededuplication", true, "Remove extra shuffle/sorting operations after join algorithm selection has been executed. \n" + "Currently it only works with Apache Tez. This should always be set to true. \n" + "Since it is a new feature, it has been made configurable."), - HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0, + HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0, "When enabled dynamic partitioning column will be globally sorted.\n" + "This way we can keep only one record writer open for each partition value\n" + "in the reducer thereby reducing the memory pressure on reducers.\n" + @@ -2610,13 +2626,13 @@ public static enum ConfVars { "Setting it to any other positive integer will make Hive use this as threshold for number of writers."), - HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."), - HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."), - HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(), + HIVE_SAMPLING_FOR_ORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."), + HIVE_SAMPLING_NUMBER_FOR_ORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."), + HIVE_SAMPLING_PERCENT_FOR_ORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(), "Probability with which a row will be chosen."), HIVE_REMOVE_ORDERBY_IN_SUBQUERY("hive.remove.orderby.in.subquery", true, "If set to true, order/sort by without limit in sub queries will be removed."), - HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this " + HIVE_OPTIMIZE_DISTINCT_REWRITE("hive.optimize.distinct.rewrite", true, "When applicable this " + "optimization rewrites distinct aggregates from a single stage to multi-stage " + "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or " + "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."), @@ -2637,7 +2653,7 @@ public static enum ConfVars { "By default, when writing data into a table and UNION ALL is the last step of the query, Hive on Tez will\n" + "create a subdirectory for each branch of the UNION ALL. When this property is enabled,\n" + "the subdirectories are removed, and the files are renamed and moved to the parent directory"), - HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."), + HIVE_OPT_CORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."), HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false, "Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" + @@ -2797,11 +2813,11 @@ public static enum ConfVars { "Set this to true to use approximation based logic to adjust ndv after join."), HIVE_STATS_NUM_NULLS_ESTIMATE_PERC("hive.stats.num.nulls.estimate.percent", (float)5, "This many percentage of rows will be estimated as number of nulls in absence of statistics."), - HIVESTATSAUTOGATHER("hive.stats.autogather", true, + HIVE_STATS_AUTOGATHER("hive.stats.autogather", true, "A flag to gather statistics (only basic) automatically during the INSERT OVERWRITE command."), - HIVESTATSCOLAUTOGATHER("hive.stats.column.autogather", true, + HIVE_STATS_COL_AUTOGATHER("hive.stats.column.autogather", true, "A flag to gather column statistics automatically."), - HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"), + HIVE_STATS_DBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"), "The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" + "each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" + "after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB @@ -3546,9 +3562,9 @@ public static enum ConfVars { "This value is only used for a given table if the kudu.master_addresses table property is not set."), // For har files - HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"), + HIVE_ARCHIVE_ENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"), - HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"), + HIVE_FETCH_TASK_CONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"), "Some select queries can be converted to single FETCH task minimizing latency.\n" + "Currently the query should be single sourced not having any subquery and should not have\n" + "any aggregations or distincts (which incurs RS), lateral views and joins.\n" + @@ -3556,30 +3572,30 @@ public static enum ConfVars { "1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" + "2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)" ), - HIVEFETCHTASKCACHING("hive.fetch.task.caching", true, + HIVE_FETCH_TASK_CACHING("hive.fetch.task.caching", true, "Enabling the caching of the result of fetch tasks eliminates the chance of running into a failing read." + " On the other hand, if enabled, the hive.fetch.task.conversion.threshold must be adjusted accordingly. That" + " is 200MB by default which must be lowered in case of enabled caching to prevent the consumption of too much memory."), - HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 209715200L, + HIVE_FETCH_TASK_CONVERSION_THRESHOLD("hive.fetch.task.conversion.threshold", 209715200L, "Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" + "is calculated by summation of file lengths. If it's not native, storage handler for the table\n" + "can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."), - HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false, + HIVE_FETCH_TASK_AGGR("hive.fetch.task.aggr", false, "Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" + "final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" + "stage to fetch task, possibly decreasing the query time."), - HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", true, + HIVE_OPTIMIZE_METADATA_QUERIES("hive.compute.query.using.stats", true, "When set to true Hive will answer a few queries like count(1) purely using stats\n" + "stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" + "For more advanced stats collection need to run analyze table queries."), // Serde for FetchTask - HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe", + HIVE_FETCH_OUTPUT_SERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe", "The SerDe used by FetchTask to serialize the fetch output."), - HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true, + HIVE_EXPR_EVALUATION_CACHE("hive.cache.expr.evaluation", true, "If true, the evaluation result of a deterministic expression referenced twice or more\n" + "will be cached.\n" + "For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" + @@ -3588,12 +3604,12 @@ public static enum ConfVars { "or filter operators."), // Hive Variables - HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true, + HIVE_VARIABLE_SUBSTITUTE("hive.variable.substitute", true, "This enables substitution using syntax like ${var} ${system:var} and ${env:var}."), - HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40, + HIVE_VARIABLE_SUBSTITUTE_DEPTH("hive.variable.substitute.depth", 40, "The maximum replacements the substitution engine will do."), - HIVECONFVALIDATION("hive.conf.validation", true, + HIVE_CONF_VALIDATION("hive.conf.validation", true, "Enables type checking for registered Hive configurations"), SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""), @@ -3677,8 +3693,12 @@ public static enum ConfVars { HIVE_CLI_PRINT_ESCAPE_CRLF("hive.cli.print.escape.crlf", false, "Whether to print carriage returns and line feeds in row output as escaped \\r and \\n"), + HIVE_CLI_TEZ_INITIALIZE_SESSION("hive.cli.tez.initialize.session", true, + "When enabled, CLI running with Tez will preemptively open a tez session during start up."), + HIVE_CLI_TEZ_SESSION_ASYNC("hive.cli.tez.session.async", true, "Whether to start Tez\n" + - "session in background when running CLI with Tez, allowing CLI to be available earlier."), + "session in background when running CLI with Tez, allowing CLI to be available earlier. " + + "If hive.cli.tez.initialize.session is set to false, this value is ignored."), HIVE_DISABLE_UNSAFE_EXTERNALTABLE_OPERATIONS("hive.disable.unsafe.external.table.operations", true, "Whether to disable certain optimizations and operations on external tables," + @@ -4548,7 +4568,7 @@ public static enum ConfVars { " it will now take 512 reducers, similarly if the max number of reducers is 511,\n" + " and a job was going to use this many, it will now use 256 reducers."), - HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false, + HIVE_OPT_LIST_BUCKETING("hive.optimize.listbucketing", false, "Enable list bucketing optimizer. Default value is false so that we disable it by default."), // Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket. @@ -4632,7 +4652,7 @@ public static enum ConfVars { "The default value is true."), HIVE_VECTORIZATION_USE_ROW_DESERIALIZE("hive.vectorized.use.row.serde.deserialize", true, "This flag should be set to true to enable vectorizing using row deserialize.\n" + - "The default value is false."), + "The default value is true."), HIVE_VECTORIZATION_ROW_DESERIALIZE_INPUTFORMAT_EXCLUDES( "hive.vectorized.row.serde.inputformat.excludes", "org.apache.parquet.hadoop.ParquetInputFormat,org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat", @@ -4738,11 +4758,11 @@ public static enum ConfVars { "Whether to generate consistent split locations when generating splits in the AM"), HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez(Hadoop 2 only)"), HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez (Hadoop 2 only)"), - HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""), - HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""), - HIVEUSEGOOGLEREGEXENGINE("hive.use.googleregex.engine",false,"whether to use google regex engine or not, default regex engine is java.util.regex"), + HIVE_STAGE_ID_REARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""), + HIVE_EXPLAIN_DEPENDENCY_APPEND_TASK_TYPES("hive.explain.dependency.append.tasktype", false, ""), + HIVE_USE_GOOGLE_REGEX_ENGINE("hive.use.googleregex.engine",false,"whether to use google regex engine or not, default regex engine is java.util.regex"), - HIVECOUNTERGROUP("hive.counters.group.name", "HIVE", + HIVE_COUNTER_GROUP("hive.counters.group.name", "HIVE", "The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"), HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column", @@ -5494,7 +5514,7 @@ public static enum ConfVars { "validated for all SQL operations after every defined interval (default: 500ms) and corresponding action\n" + "defined in the trigger will be taken"), - NWAYJOINREORDER("hive.reorder.nway.joins", true, + N_WAY_JOIN_REORDER("hive.reorder.nway.joins", true, "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"), HIVE_MERGE_NWAY_JOINS("hive.merge.nway.joins", false, "Merge adjacent joins into a single n-way join"), @@ -5575,10 +5595,12 @@ public static enum ConfVars { "hive.zookeeper.ssl.keystore.type," + "hive.zookeeper.ssl.truststore.location," + "hive.zookeeper.ssl.truststore.password," + - "hive.zookeeper.ssl.truststore.type", + "hive.zookeeper.ssl.truststore.type," + + "hive.iceberg.allow.datafiles.in.table.location.only," + + "hive.rewrite.data.policy", "Comma separated list of configuration options which are immutable at runtime"), HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list", - METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + METASTORE_PWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "," + HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD.varname + "," + DRUID_METADATA_DB_PASSWORD.varname // Adding the S3 credentials from Hadoop config to be hidden @@ -5735,7 +5757,10 @@ public static enum ConfVars { HIVE_ADDITIONAL_CONFIG_FILES("hive.additional.config.files", "", "The names of additional config files, such as ldap-site.xml," + - "tez-site.xml, etc in comma separated list."); + "tez-site.xml, etc in comma separated list."), + + REWRITE_POLICY("hive.rewrite.data.policy", "DEFAULT", + "Defines the rewrite policy, the valid values are those defined in RewritePolicy enum"); public final String varname; public final String altName; @@ -6358,11 +6383,11 @@ public String getQueryString() { } public static String getQueryString(Configuration conf) { - return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER); + return getVar(conf, ConfVars.HIVE_QUERY_STRING, EncoderDecoderFactory.URL_ENCODER_DECODER); } public static String getQueryId(Configuration conf) { - return getVar(conf, HiveConf.ConfVars.HIVEQUERYID, ""); + return getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, ""); } public void setQueryString(String query) { @@ -6370,7 +6395,7 @@ public void setQueryString(String query) { } public static void setQueryString(Configuration conf, String query) { - setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER); + setVar(conf, ConfVars.HIVE_QUERY_STRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER); } public void logVars(PrintStream ps) { for (ConfVars one : ConfVars.values()) { @@ -6474,7 +6499,7 @@ private void initialize(Class cls) { // if embedded metastore is to be used as per config so far // then this is considered like the metastore server case - String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS); + String msUri = this.getVar(HiveConf.ConfVars.METASTORE_URIS); // This is hackery, but having hive-common depend on standalone-metastore is really bad // because it will pull all of the metastore code into every module. We need to check that // we aren't using the standalone metastore. If we are, we should treat it the same as a @@ -6531,18 +6556,18 @@ private void initialize(Class cls) { // if the running class was loaded directly (through eclipse) rather than through a // jar then this would be needed if (hiveJar == null) { - hiveJar = this.get(ConfVars.HIVEJAR.varname); + hiveJar = this.get(ConfVars.HIVE_JAR.varname); } if (auxJars == null) { - auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ','); + auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVE_AUX_JARS.varname), this), ','); } if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) { setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false); } - if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { + if (getBoolVar(HiveConf.ConfVars.HIVE_CONF_VALIDATION)) { List trimmed = new ArrayList(); for (Map.Entry entry : this) { String key = entry.getKey(); @@ -6625,28 +6650,28 @@ private static String[] convertVarsToRegex(String[] paramList) { */ private static final String[] SQL_STD_AUTH_SAFE_VAR_NAMES = new String[] { ConfVars.AGGR_JOIN_TRANSPOSE.varname, - ConfVars.BYTESPERREDUCER.varname, + ConfVars.BYTES_PER_REDUCER.varname, ConfVars.CLIENT_STATS_COUNTERS.varname, ConfVars.CREATE_TABLES_AS_ACID.varname, ConfVars.CREATE_TABLE_AS_EXTERNAL.varname, - ConfVars.DEFAULTPARTITIONNAME.varname, + ConfVars.DEFAULT_PARTITION_NAME.varname, ConfVars.DROP_IGNORES_NON_EXISTENT.varname, - ConfVars.HIVECOUNTERGROUP.varname, - ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname, - ConfVars.HIVEENFORCEBUCKETMAPJOIN.varname, - ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN.varname, - ConfVars.HIVEEXPREVALUATIONCACHE.varname, - ConfVars.HIVEQUERYRESULTFILEFORMAT.varname, - ConfVars.HIVEHASHTABLELOADFACTOR.varname, - ConfVars.HIVEHASHTABLETHRESHOLD.varname, - ConfVars.HIVEIGNOREMAPJOINHINT.varname, - ConfVars.HIVELIMITMAXROWSIZE.varname, - ConfVars.HIVEMAPREDMODE.varname, - ConfVars.HIVEMAPSIDEAGGREGATE.varname, - ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname, - ConfVars.HIVEROWOFFSET.varname, - ConfVars.HIVEVARIABLESUBSTITUTE.varname, - ConfVars.HIVEVARIABLESUBSTITUTEDEPTH.varname, + ConfVars.HIVE_COUNTER_GROUP.varname, + ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT.varname, + ConfVars.HIVE_ENFORCE_BUCKET_MAPJOIN.varname, + ConfVars.HIVE_ENFORCE_SORT_MERGE_BUCKET_MAPJOIN.varname, + ConfVars.HIVE_EXPR_EVALUATION_CACHE.varname, + ConfVars.HIVE_QUERY_RESULT_FILEFORMAT.varname, + ConfVars.HIVE_HASHTABLE_LOAD_FACTOR.varname, + ConfVars.HIVE_HASHTABLE_THRESHOLD.varname, + ConfVars.HIVE_IGNORE_MAPJOIN_HINT.varname, + ConfVars.HIVE_LIMIT_MAX_ROW_SIZE.varname, + ConfVars.HIVE_MAPRED_MODE.varname, + ConfVars.HIVE_MAPSIDE_AGGREGATE.varname, + ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES.varname, + ConfVars.HIVE_ROW_OFFSET.varname, + ConfVars.HIVE_VARIABLE_SUBSTITUTE.varname, + ConfVars.HIVE_VARIABLE_SUBSTITUTE_DEPTH.varname, ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME.varname, ConfVars.HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL.varname, ConfVars.HIVE_CHECK_CROSS_PRODUCT.varname, @@ -6687,14 +6712,14 @@ private static String[] convertVarsToRegex(String[] paramList) { ConfVars.LLAP_CLIENT_CONSISTENT_SPLITS.varname, ConfVars.LLAP_ENABLE_GRACE_JOIN_IN_LLAP.varname, ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname, - ConfVars.MAXCREATEDFILES.varname, - ConfVars.MAXREDUCERS.varname, - ConfVars.NWAYJOINREORDER.varname, + ConfVars.MAX_CREATED_FILES.varname, + ConfVars.MAX_REDUCERS.varname, + ConfVars.N_WAY_JOIN_REORDER.varname, ConfVars.OUTPUT_FILE_EXTENSION.varname, ConfVars.SHOW_JOB_FAIL_DEBUG_INFO.varname, ConfVars.TASKLOG_DEBUG_TIMEOUT.varname, - ConfVars.HIVEQUERYID.varname, - ConfVars.HIVEQUERYTAG.varname, + ConfVars.HIVE_QUERY_ID.varname, + ConfVars.HIVE_QUERY_TAG.varname, }; /** @@ -6884,7 +6909,7 @@ public String getAuxJars() { */ public void setAuxJars(String auxJars) { this.auxJars = auxJars; - setVar(this, ConfVars.HIVEAUXJARS, auxJars); + setVar(this, ConfVars.HIVE_AUX_JARS, auxJars); } public URL getHiveDefaultLocation() { @@ -7066,7 +7091,7 @@ public static class StrictChecks { private static String makeMessage(String what, ConfVars setting) { return what + " are disabled for safety reasons. If you know what you are doing, please set " - + setting.varname + " to false and make sure that " + ConfVars.HIVEMAPREDMODE.varname + + + setting.varname + " to false and make sure that " + ConfVars.HIVE_MAPRED_MODE.varname + " is not set to 'strict' to proceed. Note that you may get errors or incorrect " + "results if you make a mistake while using some of the unsafe features."; } @@ -7099,7 +7124,7 @@ public static void checkOffsetWithoutOrderBy(Configuration conf) throws Semantic } private static boolean isAllowed(Configuration conf, ConfVars setting) { - String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null); + String mode = HiveConf.getVar(conf, ConfVars.HIVE_MAPRED_MODE, (String)null); return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting); } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java b/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java index 973b9acae278..4ea28227628a 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java +++ b/common/src/java/org/apache/hadoop/hive/conf/VariableSubstitution.java @@ -59,12 +59,12 @@ public String substitute(HiveConf conf, String expr) { if (expr == null) { return expr; } - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_VARIABLE_SUBSTITUTE)) { l4j.debug("Substitution is on: " + expr); } else { return expr; } - int depth = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVEVARIABLESUBSTITUTEDEPTH); + int depth = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_VARIABLE_SUBSTITUTE_DEPTH); return substitute(conf, expr, depth); } } diff --git a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index 0968227fff01..4d94e6dae5aa 100644 --- a/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/common/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -421,7 +421,7 @@ public enum ErrorMsg { REPLACE_UNSUPPORTED_TYPE_CONVERSION(10314, "Replacing columns with unsupported type conversion (from {0} to {1}) for column {2}. SerDe may be incompatible", true), HIVE_GROUPING_SETS_AGGR_NOMAPAGGR_MULTIGBY(10315, "Grouping sets aggregations (with rollups or cubes) are not allowed when " + - "HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"), + "HIVE_MULTI_GROUPBY_SINGLE_REDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets"), CANNOT_RETRIEVE_TABLE_METADATA(10316, "Error while retrieving table metadata"), INVALID_AST_TREE(10318, "Internal error : Invalid AST"), ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"), diff --git a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java index d6fcc0bda750..cc57e9b42b00 100644 --- a/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java +++ b/common/src/java/org/apache/hadoop/hive/ql/log/PerfLogger.java @@ -119,10 +119,6 @@ public static PerfLogger getPerfLogger(HiveConf conf, boolean resetPerfLogger) { return result; } - public static void setPerfLogger(PerfLogger resetPerfLogger) { - perfLogger.set(resetPerfLogger); - } - /** * Call this function when you start to measure time spent by a piece of code. * @param callerName the logging object to be used. diff --git a/common/src/java/org/apache/hive/http/HttpServer.java b/common/src/java/org/apache/hive/http/HttpServer.java index 2d73a920d47f..83555fc5c73e 100644 --- a/common/src/java/org/apache/hive/http/HttpServer.java +++ b/common/src/java/org/apache/hive/http/HttpServer.java @@ -536,7 +536,7 @@ Connector createChannelConnector(int queueSize, Builder b) { if (!b.useSSL) { connector = new ServerConnector(webServer, http); } else { - SslContextFactory sslContextFactory = new SslContextFactory(); + SslContextFactory sslContextFactory = new SslContextFactory.Server(); sslContextFactory.setKeyStorePath(b.keyStorePath); sslContextFactory.setKeyStoreType(b.keyStoreType == null || b.keyStoreType.isEmpty() ? KeyStore.getDefaultType(): b.keyStoreType); diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java index bff79a98faa5..0e8e4c35fc5d 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java @@ -71,19 +71,19 @@ public void testConfProperties() throws Exception { // checkHiveConf(ConfVars.HADOOPFS.varname, "core-site.xml"); // Make sure non-null-valued ConfVar properties *do* override the Hadoop Configuration - checkHadoopConf(ConfVars.HADOOPNUMREDUCERS.varname, "1"); - checkConfVar(ConfVars.HADOOPNUMREDUCERS, "-1"); - checkHiveConf(ConfVars.HADOOPNUMREDUCERS.varname, "-1"); + checkHadoopConf(ConfVars.HADOOP_NUM_REDUCERS.varname, "1"); + checkConfVar(ConfVars.HADOOP_NUM_REDUCERS, "-1"); + checkHiveConf(ConfVars.HADOOP_NUM_REDUCERS.varname, "-1"); // Non-null ConfVar only defined in ConfVars - checkHadoopConf(ConfVars.HIVESKEWJOINKEY.varname, null); - checkConfVar(ConfVars.HIVESKEWJOINKEY, "100000"); - checkHiveConf(ConfVars.HIVESKEWJOINKEY.varname, "100000"); + checkHadoopConf(ConfVars.HIVE_SKEWJOIN_KEY.varname, null); + checkConfVar(ConfVars.HIVE_SKEWJOIN_KEY, "100000"); + checkHiveConf(ConfVars.HIVE_SKEWJOIN_KEY.varname, "100000"); // ConfVar overridden in in hive-site.xml - checkHadoopConf(ConfVars.HIVETESTMODEDUMMYSTATAGGR.varname, null); - checkConfVar(ConfVars.HIVETESTMODEDUMMYSTATAGGR, ""); - checkHiveConf(ConfVars.HIVETESTMODEDUMMYSTATAGGR.varname, "value2"); + checkHadoopConf(ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR.varname, null); + checkConfVar(ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR, ""); + checkHiveConf(ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR.varname, "value2"); //Property defined for hive masking algorithm checkConfVar(ConfVars.HIVE_MASKING_ALGO, "sha256"); @@ -94,7 +94,7 @@ public void testConfProperties() throws Exception { checkHiveConf("test.property1", "value1"); // Test HiveConf property variable substitution in hive-site.xml - checkHiveConf("test.var.hiveconf.property", ConfVars.DEFAULTPARTITIONNAME.getDefaultValue()); + checkHiveConf("test.var.hiveconf.property", ConfVars.DEFAULT_PARTITION_NAME.getDefaultValue()); } @Test @@ -150,7 +150,7 @@ public void testHiddenConfig() throws Exception { } ArrayList hiddenList = Lists.newArrayList( - HiveConf.ConfVars.METASTOREPWD.varname, + HiveConf.ConfVars.METASTORE_PWD.varname, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname, HiveConf.ConfVars.HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD.varname, "fs.s3.awsSecretAccessKey", @@ -196,7 +196,7 @@ public void testEncodingDecoding() throws UnsupportedEncodingException { HiveConf conf = new HiveConf(); String query = "select blah, '\u0001' from random_table"; conf.setQueryString(query); - Assert.assertEquals(URLEncoder.encode(query, "UTF-8"), conf.get(ConfVars.HIVEQUERYSTRING.varname)); + Assert.assertEquals(URLEncoder.encode(query, "UTF-8"), conf.get(ConfVars.HIVE_QUERY_STRING.varname)); Assert.assertEquals(query, conf.getQueryString()); } diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java index 1d0beaf58678..76b825b26f5c 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java @@ -36,7 +36,7 @@ public class TestHiveConfRestrictList { public void setUp() throws Exception { System.setProperty(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname, - ConfVars.HIVETESTMODEPREFIX.varname); + ConfVars.HIVE_TEST_MODE_PREFIX.varname); conf = new HiveConf(); } @@ -46,7 +46,7 @@ public void setUp() throws Exception { */ @Test public void testRestriction() throws Exception { - verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); + verifyRestriction(ConfVars.HIVE_TEST_MODE_PREFIX.varname, "foo"); conf.verifyAndSet(ConfVars.HIVE_AM_SPLIT_GENERATION.varname, "false"); } @@ -56,7 +56,7 @@ public void testRestriction() throws Exception { */ @Test public void testMultipleRestrictions() throws Exception { - verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); + verifyRestriction(ConfVars.HIVE_TEST_MODE_PREFIX.varname, "foo"); verifyRestriction(ConfVars.HIVE_IN_TEST.varname, "true"); } @@ -75,25 +75,25 @@ public void testRestrictList() throws Exception { */ @Test public void testAppendRestriction() throws Exception { - String appendListStr = ConfVars.SCRATCHDIR.varname + "," + - ConfVars.LOCALSCRATCHDIR.varname + "," + - ConfVars.METASTOREURIS.varname; + String appendListStr = ConfVars.SCRATCH_DIR.varname + "," + + ConfVars.LOCAL_SCRATCH_DIR.varname + "," + + ConfVars.METASTORE_URIS.varname; conf.addToRestrictList(appendListStr); // check if the new configs are added to HIVE_CONF_RESTRICTED_LIST String newRestrictList = conf.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST); - assertTrue(newRestrictList.contains(ConfVars.SCRATCHDIR.varname)); - assertTrue(newRestrictList.contains(ConfVars.LOCALSCRATCHDIR.varname)); - assertTrue(newRestrictList.contains(ConfVars.METASTOREURIS.varname)); + assertTrue(newRestrictList.contains(ConfVars.SCRATCH_DIR.varname)); + assertTrue(newRestrictList.contains(ConfVars.LOCAL_SCRATCH_DIR.varname)); + assertTrue(newRestrictList.contains(ConfVars.METASTORE_URIS.varname)); // check if the old values are still there in HIVE_CONF_RESTRICTED_LIST - assertTrue(newRestrictList.contains(ConfVars.HIVETESTMODEPREFIX.varname)); + assertTrue(newRestrictList.contains(ConfVars.HIVE_TEST_MODE_PREFIX.varname)); // verify that the new configs are in effect - verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); + verifyRestriction(ConfVars.HIVE_TEST_MODE_PREFIX.varname, "foo"); verifyRestriction(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname, "foo"); - verifyRestriction(ConfVars.LOCALSCRATCHDIR.varname, "foo"); - verifyRestriction(ConfVars.METASTOREURIS.varname, "foo"); + verifyRestriction(ConfVars.LOCAL_SCRATCH_DIR.varname, "foo"); + verifyRestriction(ConfVars.METASTORE_URIS.varname, "foo"); } private void verifyRestriction(String varName, String newVal) { diff --git a/common/src/test/org/apache/hadoop/hive/ql/log/PerfLoggerTest.java b/common/src/test/org/apache/hadoop/hive/ql/log/PerfLoggerTest.java index d844a983a7b4..d06bfa873568 100644 --- a/common/src/test/org/apache/hadoop/hive/ql/log/PerfLoggerTest.java +++ b/common/src/test/org/apache/hadoop/hive/ql/log/PerfLoggerTest.java @@ -55,7 +55,6 @@ public void testMT() throws InterruptedException { AtomicInteger count = new AtomicInteger(0); // getEndTimes in a loop executorService.execute(() -> { - PerfLogger.setPerfLogger(pl); try { count.incrementAndGet(); snooze(100); @@ -76,7 +75,6 @@ public void testMT() throws InterruptedException { executorService.execute(() -> { try { int cnt = count.incrementAndGet(); - PerfLogger.setPerfLogger(pl); for (int i = 0; i < 64; ++i) { pl.perfLogBegin("test", PerfLogger.COMPILE + "_ "+ cnt + "_" + i); snooze(50); diff --git a/contrib/pom.xml b/contrib/pom.xml index 153ce15d4abf..08ed544ae6f6 100644 --- a/contrib/pom.xml +++ b/contrib/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-contrib diff --git a/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java b/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java index 7e09faf44b8e..4a553d5f9df9 100644 --- a/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java +++ b/contrib/src/java/org/apache/hadoop/hive/contrib/metastore/hooks/SampleURLHook.java @@ -33,7 +33,7 @@ public class SampleURLHook implements JDOConnectionURLHook { @Override public String getJdoConnectionUrl(Configuration conf) throws Exception { if (originalUrl == null) { - originalUrl = conf.get(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, ""); + originalUrl = conf.get(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, ""); return "jdbc:derby:;databaseName=target/tmp/junit_metastore_db_blank;create=true"; } else { return originalUrl; diff --git a/data/conf/iceberg/llap/hive-site.xml b/data/conf/iceberg/llap/hive-site.xml index 57982980abf3..fb941d991c49 100644 --- a/data/conf/iceberg/llap/hive-site.xml +++ b/data/conf/iceberg/llap/hive-site.xml @@ -38,10 +38,12 @@ A base for other temporary directories. + + + - hive.tez.container.size - 128 - + hive.tez.container.size + 128 diff --git a/data/conf/iceberg/llap/tez-site.xml b/data/conf/iceberg/llap/tez-site.xml index fff6c8756203..b96019bb37fa 100644 --- a/data/conf/iceberg/llap/tez-site.xml +++ b/data/conf/iceberg/llap/tez-site.xml @@ -11,10 +11,6 @@ tez.runtime.io.sort.mb 24 - - hive.tez.container.size - 512 - tez.counters.max 1024 diff --git a/data/conf/iceberg/tez/hive-site.xml b/data/conf/iceberg/tez/hive-site.xml index 0dc31fed5edd..272859dd915d 100644 --- a/data/conf/iceberg/tez/hive-site.xml +++ b/data/conf/iceberg/tez/hive-site.xml @@ -40,8 +40,7 @@ hive.tez.container.size - 128 - + 512 diff --git a/data/conf/iceberg/tez/tez-site.xml b/data/conf/iceberg/tez/tez-site.xml index fff6c8756203..7b26d6aad3e8 100644 --- a/data/conf/iceberg/tez/tez-site.xml +++ b/data/conf/iceberg/tez/tez-site.xml @@ -5,16 +5,12 @@ tez.am.resource.memory.mb - 256 + 512 tez.runtime.io.sort.mb 24 - - hive.tez.container.size - 512 - tez.counters.max 1024 diff --git a/data/conf/llap/hive-site.xml b/data/conf/llap/hive-site.xml index 27609b7a2be3..fe69d81de7e3 100644 --- a/data/conf/llap/hive-site.xml +++ b/data/conf/llap/hive-site.xml @@ -44,10 +44,12 @@ A base for other temporary directories. + + + hive.tez.container.size 128 - diff --git a/data/conf/llap/tez-site.xml b/data/conf/llap/tez-site.xml index 6118e6edf8eb..cc4177e3cbd2 100644 --- a/data/conf/llap/tez-site.xml +++ b/data/conf/llap/tez-site.xml @@ -9,10 +9,6 @@ tez.am.resource.memory.mb 128 - - tez.task.resource.memory.mb - 128 - tez.runtime.io.sort.mb 24 diff --git a/data/conf/tez/tez-site.xml b/data/conf/tez/tez-site.xml index 88adb6a57e80..ff3b468fca33 100644 --- a/data/conf/tez/tez-site.xml +++ b/data/conf/tez/tez-site.xml @@ -3,10 +3,6 @@ tez.am.resource.memory.mb 512 - - tez.task.resource.memory.mb - 128 - tez.runtime.io.sort.mb 24 diff --git a/data/files/control_characters.txt b/data/files/control_characters.txt new file mode 100644 index 000000000000..4e3fc6c45351 --- /dev/null +++ b/data/files/control_characters.txt @@ -0,0 +1 @@ +abcdeÿfghi \ No newline at end of file diff --git a/data/files/datasets/temp_udaf/load.hive.sql b/data/files/datasets/temp_udaf/load.hive.sql new file mode 100644 index 000000000000..f0e59b3feaee --- /dev/null +++ b/data/files/datasets/temp_udaf/load.hive.sql @@ -0,0 +1 @@ +create function ptf_register_use_func as 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFRowNumber'; diff --git a/data/files/datasets/temp_udf/load.hive.sql b/data/files/datasets/temp_udf/load.hive.sql new file mode 100644 index 000000000000..b8a21d37a23a --- /dev/null +++ b/data/files/datasets/temp_udf/load.hive.sql @@ -0,0 +1 @@ +CREATE FUNCTION compute_stats AS 'org.apache.hadoop.hive.ql.udf.generic.GenericUDAFComputeStats'; \ No newline at end of file diff --git a/druid-handler/pom.xml b/druid-handler/pom.xml index 8e28088e9d2c..0c0e2f96e7ad 100644 --- a/druid-handler/pom.xml +++ b/druid-handler/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-druid-handler diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java index cd35ebd81f71..656fa40c03fa 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/DruidStorageHandler.java @@ -819,7 +819,7 @@ private List fetchSegmentsMetadata(Path segmentDescriptorDir) throw private String getUniqueId() { if (uniqueId == null) { uniqueId = - Preconditions.checkNotNull(Strings.emptyToNull(HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVEQUERYID)), + Preconditions.checkNotNull(Strings.emptyToNull(HiveConf.getVar(getConf(), HiveConf.ConfVars.HIVE_QUERY_ID)), "Hive query id is null"); } return uniqueId; diff --git a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java index fc50e0b9216d..2a2be067125f 100644 --- a/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java +++ b/druid-handler/src/java/org/apache/hadoop/hive/druid/io/DruidQueryBasedInputFormat.java @@ -101,7 +101,7 @@ public static DruidQueryRecordReader getDruidQueryReader(String druidQueryType) protected HiveDruidSplit[] getInputSplits(Configuration conf) throws IOException { String address = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DRUID_BROKER_DEFAULT_ADDRESS); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); if (StringUtils.isEmpty(address)) { throw new IOException("Druid broker address not specified in configuration"); } diff --git a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java index 7d94f1afc996..5040831b9a94 100644 --- a/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java +++ b/druid-handler/src/test/org/apache/hadoop/hive/druid/TestDruidStorageHandler.java @@ -111,7 +111,7 @@ private DataSegment createSegment(String location, Interval interval, String ver Mockito.when(tableMock.getDbName()).thenReturn(DB_NAME); Mockito.when(tableMock.getTableName()).thenReturn(TABLE_NAME); config = new Configuration(); - config.set(String.valueOf(HiveConf.ConfVars.HIVEQUERYID), "hive-" + UUID.randomUUID().toString()); + config.set(String.valueOf(HiveConf.ConfVars.HIVE_QUERY_ID), "hive-" + UUID.randomUUID().toString()); config.set(String.valueOf(HiveConf.ConfVars.DRUID_WORKING_DIR), tableWorkingPath); config.set(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY), new Path(tableWorkingPath, "finalSegmentDir").toString()); diff --git a/hbase-handler/pom.xml b/hbase-handler/pom.xml index e0a4c8de19a0..756dd45203f2 100644 --- a/hbase-handler/pom.xml +++ b/hbase-handler/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-hbase-handler diff --git a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java index da69f0887f77..16bdd8cf3d27 100644 --- a/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java +++ b/hbase-handler/src/test/org/apache/hadoop/hive/hbase/TestHBaseQueries.java @@ -105,7 +105,7 @@ public void testRollbackDoesNotDeleteOriginTableWhenCTLTFails() throws CommandPr conf.setBoolVar(HiveConf.ConfVars.HIVE_STRICT_MANAGED_TABLES, true); conf.setBoolVar(HiveConf.ConfVars.CREATE_TABLES_AS_ACID, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY, true); - conf.setVar(HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT, "ORC"); + conf.setVar(HiveConf.ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT, "ORC"); driver = DriverFactory.newDriver(conf); diff --git a/hcatalog/core/pom.xml b/hcatalog/core/pom.xml index b0663ea530ab..b7ae48cc87a9 100644 --- a/hcatalog/core/pom.xml +++ b/hcatalog/core/pom.xml @@ -22,7 +22,7 @@ org.apache.hive.hcatalog hive-hcatalog - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-hcatalog-core diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java index 1e319b4e7dea..c42e9936f47a 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HCatConstants.java @@ -81,7 +81,7 @@ private HCatConstants() { // restrict instantiation public static final String HCAT_TABLE_SCHEMA = "hcat.table.schema"; - public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTOREURIS.varname; + public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTORE_URIS.varname; public static final String HCAT_PERMS = "hcat.perms"; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java index d786e3c4822e..11e53d94a48b 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/common/HiveClientCache.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; -import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; @@ -327,7 +326,7 @@ static class HiveClientCacheKey { final private int threadId; private HiveClientCacheKey(HiveConf hiveConf, final int threadId) throws IOException, LoginException { - this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS); + this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTORE_URIS); ugi = Utils.getUGI(); this.hiveConf = hiveConf; this.threadId = threadId; diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java index cda8770a2c98..ceafabaa6eb0 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/DynamicPartitionFileRecordWriterContainer.java @@ -88,7 +88,7 @@ public DynamicPartitionFileRecordWriterContainer( this.dynamicContexts = new HashMap(); this.dynamicObjectInspectors = new HashMap(); this.dynamicOutputJobInfo = new HashMap(); - this.HIVE_DEFAULT_PARTITION_VALUE = HiveConf.getVar(context.getConfiguration(), HiveConf.ConfVars.DEFAULTPARTITIONNAME); + this.HIVE_DEFAULT_PARTITION_VALUE = HiveConf.getVar(context.getConfiguration(), HiveConf.ConfVars.DEFAULT_PARTITION_NAME); } @Override @@ -149,7 +149,7 @@ protected LocalFileWriter getLocalFileWriter(HCatRecord value) throws IOExceptio throw new HCatException(ErrorType.ERROR_TOO_MANY_DYNAMIC_PTNS, "Number of dynamic partitions being created " + "exceeds configured max allowable partitions[" + maxDynamicPartitions - + "], increase parameter [" + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + + "], increase parameter [" + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS.varname + "] if needed."); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java index 476c60e53af5..e585b5b08c5c 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/FileOutputCommitterContainer.java @@ -488,7 +488,7 @@ private void updateTableSchema(IMetaStoreClient client, Table table, } /** - * Move all of the files from the temp directory to the final location + * Move task output from the temp directory to the final location * @param srcf the file to move * @param srcDir the source directory * @param destDir the target directory @@ -497,7 +497,7 @@ private void updateTableSchema(IMetaStoreClient client, Table table, */ private void moveTaskOutputs(final Configuration conf, Path srcf, Path srcDir, Path destDir, boolean immutable) throws IOException { - if(LOG.isDebugEnabled()) { + if (LOG.isDebugEnabled()) { LOG.debug("moveTaskOutputs " + srcf + " from: " + srcDir + " to: " + destDir + " immutable: " + immutable); } @@ -516,8 +516,8 @@ private void moveTaskOutputs(final Configuration conf, Path srcf, Path srcDir, LinkedList> moves = new LinkedList<>(); if (customDynamicLocationUsed) { - if (immutable && destFs.exists(destDir) && - !org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(destFs, destDir)) { + if (immutable && destFs.exists(destDir) + && !org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(destFs, destDir)) { throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION, "Data already exists in " + destDir + ", duplicate publish not possible."); @@ -536,19 +536,18 @@ private void moveTaskOutputs(final Configuration conf, Path srcf, Path srcDir, FileStatus srcStatus = srcQ.remove(); Path srcF = srcStatus.getPath(); final Path finalOutputPath = getFinalPath(destFs, srcF, srcDir, destDir, immutable); - if (immutable && destFs.exists(finalOutputPath) && - !org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(destFs, finalOutputPath)) { - throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION, - "Data already exists in " + finalOutputPath - + ", duplicate publish not possible."); - } - if (srcStatus.isDirectory()) { + if (immutable && destFs.exists(finalOutputPath) + && !org.apache.hadoop.hive.metastore.utils.FileUtils.isDirEmpty(destFs, finalOutputPath)) { + if (partitionsDiscoveredByPath.containsKey(srcF.toString())) { + throw new HCatException(ErrorType.ERROR_DUPLICATE_PARTITION, + "Data already exists in " + finalOutputPath + ", duplicate publish not possible."); + } + // parent directory may exist for multi-partitions, check lower level partitions + Collections.addAll(srcQ, srcFs.listStatus(srcF, HIDDEN_FILES_PATH_FILTER)); + } else if (srcStatus.isDirectory()) { if (canRename && dynamicPartitioningUsed) { // If it is partition, move the partition directory instead of each file. - // If custom dynamic location provided, need to rename to final output path - final Path parentDir = finalOutputPath.getParent(); - Path dstPath = !customDynamicLocationUsed ? parentDir : finalOutputPath; - moves.add(Pair.of(srcF, dstPath)); + moves.add(Pair.of(srcF, finalOutputPath)); } else { Collections.addAll(srcQ, srcFs.listStatus(srcF, HIDDEN_FILES_PATH_FILTER)); } @@ -558,50 +557,69 @@ private void moveTaskOutputs(final Configuration conf, Path srcf, Path srcDir, } } - if (moves.isEmpty()) { + bulkMoveFiles(conf, srcFs, destFs, moves); + } + + /** + * Bulk move files from source to destination. + * @param srcFs the source filesystem where the source files are + * @param destFs the destionation filesystem where the destionation files are + * @param pairs list of pairs of , move source_path to destination_path + * @throws java.io.IOException + */ + private void bulkMoveFiles(final Configuration conf, final FileSystem srcFs, final FileSystem destFs, + final List> pairs) throws IOException { + if (pairs.isEmpty()) { return; } - + final boolean canRename = srcFs.getUri().equals(destFs.getUri()); final List>> futures = new LinkedList<>(); - final ExecutorService pool = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25) > 0 ? - Executors.newFixedThreadPool(conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25), - new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()) : null; + final int moveThreadsCount = conf.getInt(ConfVars.HIVE_MOVE_FILES_THREAD_COUNT.varname, 25); - for (final Pair pair: moves){ + if (moveThreadsCount <= 0) { + for (final Pair pair: pairs) { + Path srcP = pair.getLeft(); + Path dstP = pair.getRight(); + if (!moveFile(srcFs, srcP, destFs, dstP, conf, canRename)) { + throw new HCatException(ErrorType.ERROR_MOVE_FAILED, + "Unable to move from " + srcP + " to " + dstP); + } + } + return; + } + + final ExecutorService pool = Executors.newFixedThreadPool(moveThreadsCount, + new ThreadFactoryBuilder().setDaemon(true).setNameFormat("Move-Thread-%d").build()); + + for (final Pair pair: pairs) { Path srcP = pair.getLeft(); Path dstP = pair.getRight(); - final String msg = "Unable to move source " + srcP + " to destination " + dstP; - if (null==pool) { - moveFile(srcFs, srcP, destFs, dstP, conf, canRename); - } else { - futures.add(pool.submit(new Callable>() { - @Override - public Pair call() throws IOException { - if (moveFile(srcFs, srcP, destFs, dstP, conf, canRename)) { - return pair; - } else { - throw new HCatException(ErrorType.ERROR_MOVE_FAILED, msg); - } + futures.add(pool.submit(new Callable>() { + @Override + public Pair call() throws IOException { + if (moveFile(srcFs, srcP, destFs, dstP, conf, canRename)) { + return pair; + } else { + throw new HCatException(ErrorType.ERROR_MOVE_FAILED, + "Unable to move from " + srcP + " to " + dstP); } - })); - } - } - if (null != pool) { - pool.shutdown(); - for (Future> future : futures) { - try { - Pair pair = future.get(); - LOG.debug("Moved src: {}, to dest: {}", pair.getLeft().toString(), pair.getRight().toString()); - } catch (Exception e) { - LOG.error("Failed to move {}", e.getMessage()); - pool.shutdownNow(); - throw new HCatException(ErrorType.ERROR_MOVE_FAILED, e.getMessage()); } + })); + } + pool.shutdown(); + for (Future> future : futures) { + try { + future.get(); + } catch (Exception e) { + pool.shutdownNow(); + throw new HCatException(ErrorType.ERROR_MOVE_FAILED, e.getMessage()); } } } - private boolean moveFile(FileSystem srcFs, Path srcf, FileSystem destFs, Path destf, Configuration conf, boolean canRename) throws IOException { + private boolean moveFile(final FileSystem srcFs, final Path srcf, final FileSystem destFs, final Path destf, + final Configuration conf, final boolean canRename) throws IOException { + LOG.debug("Moving src: {}, to dest: {}", srcf, destf); boolean moved; if (canRename) { destFs.mkdirs(destf.getParent()); @@ -716,7 +734,7 @@ private void discoverPartitions(JobContext context) throws IOException { + "exceeds configured max allowable partitions[" + maxDynamicPartitions + "], increase parameter [" - + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS.varname + "] if needed."); } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java index 7167bd913e4d..9aaf67e40d84 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatOutputFormat.java @@ -280,14 +280,14 @@ private static int getMaxDynamicPartitions(HiveConf hConf) { if (HCatConstants.HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED) { maxDynamicPartitions = hConf.getIntVar( - HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS); + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS); } return maxDynamicPartitions; } private static boolean getHarRequested(HiveConf hConf) { - return hConf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED); + return hConf.getBoolVar(HiveConf.ConfVars.HIVE_ARCHIVE_ENABLED); } } diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatRecordReader.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatRecordReader.java index 29fc154686c6..4550a794cbb7 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatRecordReader.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/mapreduce/HCatRecordReader.java @@ -226,7 +226,7 @@ public void close() throws IOException { /** * Tracks number of of errors in input and throws a Runtime exception * if the rate of errors crosses a limit. - *
+ *
* The intention is to skip over very rare file corruption or incorrect * input, but catch programmer errors (incorrect format, or incorrect * deserializers etc). diff --git a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java index 084185a7ed10..7cbc29d1ec39 100644 --- a/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java +++ b/hcatalog/core/src/main/java/org/apache/hive/hcatalog/rcfile/RCFileMapReduceInputFormat.java @@ -45,7 +45,7 @@ public RecordReader createRecordReader(Inpu @Override public List getSplits(JobContext job) throws IOException { HiveConf.setLongVar(job.getConfiguration(), - HiveConf.ConfVars.MAPREDMINSPLITSIZE, SequenceFile.SYNC_INTERVAL); + HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, SequenceFile.SYNC_INTERVAL); return super.getSplits(job); } } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java index 2b57d8d8ae69..749409a03620 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestPermsGrp.java @@ -100,15 +100,15 @@ public void setUp() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 60, TimeUnit.SECONDS); hcatConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hcatConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hcatConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); - hcatConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + hcatConf.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - hcatConf.set(HiveConf.ConfVars.METASTOREURIS.varname, + hcatConf.set(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); clientWH = new Warehouse(hcatConf); msc = new HiveMetaStoreClient(hcatConf); @@ -200,18 +200,18 @@ private int callHCatCli(String[] args) throws Exception { argsList.add("-Dhive.support.concurrency=false"); argsList .add("-Dhive.security.authorization.manager=org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - argsList.add("-D" + HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES.varname + "=3"); - argsList.add("-D" + HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES.varname + "=3"); + argsList.add("-D" + HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES.varname + "=3"); + argsList.add("-D" + HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES.varname + "=3"); argsList.add("-D" + HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT.varname + "=60"); - argsList.add("-D" + HiveConf.ConfVars.METASTOREWAREHOUSE.varname + "=" + argsList.add("-D" + HiveConf.ConfVars.METASTORE_WAREHOUSE.varname + "=" + MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); - argsList.add("-D" + HiveConf.ConfVars.METASTORECONNECTURLKEY.varname + "=" + argsList.add("-D" + HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname + "=" + MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - argsList.add("-D" + HiveConf.ConfVars.METASTOREURIS.varname + "=" + argsList.add("-D" + HiveConf.ConfVars.METASTORE_URIS.varname + "=" + MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); argsList.add("-D" + HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname + "=" + HCatSemanticAnalyzer.class.getName()); - argsList.add("-D" + HiveConf.ConfVars.PREEXECHOOKS.varname + "="); - argsList.add("-D" + HiveConf.ConfVars.POSTEXECHOOKS.varname + "="); + argsList.add("-D" + HiveConf.ConfVars.PRE_EXEC_HOOKS.varname + "="); + argsList.add("-D" + HiveConf.ConfVars.POST_EXEC_HOOKS.varname + "="); argsList.add("-D" + HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname + "=false"); argsList.add("-D" + "test.warehouse.dir=" + System.getProperty("test.warehouse.dir")); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index b5f29f5e40fd..546317ab00ec 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -65,7 +65,7 @@ public void setUpHCatDriver() throws IOException { hcatConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - hcatConf.set(HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE.varname, + hcatConf.set(HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE.varname, "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java index 63715432b721..73558f92cd71 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestUseDatabase.java @@ -44,8 +44,8 @@ public void setUp() throws Exception { HiveConf hcatConf = new HiveConf(this.getClass()); hcatConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - hcatConf.set(ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(ConfVars.POST_EXEC_HOOKS.varname, ""); hcatConf.set(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hcatConf.set(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java index fe1d8afdc8bd..58772179a82a 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHiveClientCache.java @@ -83,7 +83,7 @@ public void testCacheHit() throws IOException, MetaException, LoginException { client.close(); // close shouldn't matter // Setting a non important configuration should return the same client only - hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10); + hiveConf.setIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS, 10); HiveClientCache.ICacheableMetaStoreClient client2 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); assertNotNull(client2); assertSame(client, client2); @@ -98,7 +98,7 @@ public void testCacheMiss() throws IOException, MetaException, LoginException { assertNotNull(client); // Set different uri as it is one of the criteria deciding whether to return the same client or not - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are checked for string equivalence, even spaces make them different IMetaStoreClient client2 = cache.get(hiveConf); assertNotNull(client2); assertNotSame(client, client2); @@ -157,7 +157,7 @@ public IMetaStoreClient call() throws IOException, MetaException, LoginException public void testCloseAllClients() throws IOException, MetaException, LoginException { final HiveClientCache cache = new HiveClientCache(1000); HiveClientCache.ICacheableMetaStoreClient client1 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, " "); // URIs are checked for string equivalence, even spaces make them different + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, " "); // URIs are checked for string equivalence, even spaces make them different HiveClientCache.ICacheableMetaStoreClient client2 = (HiveClientCache.ICacheableMetaStoreClient) cache.get(hiveConf); cache.closeAllClientsQuietly(); assertTrue(client1.isClosed()); @@ -227,18 +227,18 @@ public LocalMetaServer() { securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); hiveConf = new HiveConf(TestHiveClientCache.class); - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + MS_PORT); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); } public void start() throws InterruptedException { diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java index cf5ef56b5201..08a941c5e1f1 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/data/HCatDataCheckUtil.java @@ -46,8 +46,8 @@ public static IDriver instantiateDriver(MiniCluster cluster) { for (Entry e : cluster.getProperties().entrySet()) { hiveConf.set(e.getKey().toString(), e.getValue().toString()); } - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); LOG.debug("Hive conf : {}", hiveConf.getAllProperties()); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java index a304e49ae879..25cb75ec41d2 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/HCatBaseTest.java @@ -89,12 +89,12 @@ protected void setUpHiveConf() { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() + File.separator + "mapred" + File.separator + "temp"); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, ""); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, true); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java index 424e428be545..9ee887b933bd 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatDynamicPartitioned.java @@ -38,7 +38,6 @@ import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; -import org.junit.BeforeClass; import org.junit.Test; import org.slf4j.Logger; @@ -53,13 +52,13 @@ public class TestHCatDynamicPartitioned extends HCatMapReduceTest { private static List dataColumns; private static final Logger LOG = LoggerFactory.getLogger(TestHCatDynamicPartitioned.class); protected static final int NUM_RECORDS = 20; - protected static final int NUM_PARTITIONS = 5; + protected static final int NUM_TOP_PARTITIONS = 5; public TestHCatDynamicPartitioned(String formatName, String serdeClass, String inputFormatClass, String outputFormatClass) throws Exception { super(formatName, serdeClass, inputFormatClass, outputFormatClass); tableName = "testHCatDynamicPartitionedTable_" + formatName; - generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0); + generateWriteRecords(NUM_RECORDS, NUM_TOP_PARTITIONS, 0); generateDataColumns(); } @@ -68,6 +67,8 @@ protected static void generateDataColumns() throws HCatException { dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c1", serdeConstants.INT_TYPE_NAME, ""))); dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("c2", serdeConstants.STRING_TYPE_NAME, ""))); dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, ""))); + dataColumns.add(HCatSchemaUtils.getHCatFieldSchema(new FieldSchema("p2", serdeConstants.STRING_TYPE_NAME, ""))); + } protected static void generateWriteRecords(int max, int mod, int offset) { @@ -79,6 +80,7 @@ protected static void generateWriteRecords(int max, int mod, int offset) { objList.add(i); objList.add("strvalue" + i); objList.add(String.valueOf((i % mod) + offset)); + objList.add(String.valueOf((i / (max/2)) + offset)); writeRecords.add(new DefaultHCatRecord(objList)); } } @@ -87,6 +89,7 @@ protected static void generateWriteRecords(int max, int mod, int offset) { protected List getPartitionKeys() { List fields = new ArrayList(); fields.add(new FieldSchema("p1", serdeConstants.STRING_TYPE_NAME, "")); + fields.add(new FieldSchema("p2", serdeConstants.STRING_TYPE_NAME, "")); return fields; } @@ -118,8 +121,11 @@ public void testHCatDynamicPartitionedTableMultipleTask() throws Exception { protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, String customDynamicPathPattern) throws Exception { - generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0); - runMRCreate(null, dataColumns, writeRecords, NUM_RECORDS, true, asSingleMapTask, customDynamicPathPattern); + generateWriteRecords(NUM_RECORDS, NUM_TOP_PARTITIONS, 0); + runMRCreate(null, dataColumns, writeRecords.subList(0,NUM_RECORDS/2), NUM_RECORDS/2, + true, asSingleMapTask, customDynamicPathPattern); + runMRCreate(null, dataColumns, writeRecords.subList(NUM_RECORDS/2,NUM_RECORDS), NUM_RECORDS/2, + true, asSingleMapTask, customDynamicPathPattern); runMRRead(NUM_RECORDS); @@ -141,7 +147,7 @@ protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, //Test for duplicate publish IOException exc = null; try { - generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0); + generateWriteRecords(NUM_RECORDS, NUM_TOP_PARTITIONS, 0); Job job = runMRCreate(null, dataColumns, writeRecords, NUM_RECORDS, false, true, customDynamicPathPattern); @@ -168,7 +174,7 @@ protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, driver.run(query); res = new ArrayList(); driver.getResults(res); - assertEquals(NUM_PARTITIONS, res.size()); + assertEquals(NUM_TOP_PARTITIONS*2, res.size()); query = "select * from " + tableName; driver.run(query); @@ -196,7 +202,7 @@ protected void runHCatDynamicPartitionedTable(boolean asSingleMapTask, public void _testHCatDynamicPartitionMaxPartitions() throws Exception { HiveConf hc = new HiveConf(this.getClass()); - int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS); + int maxParts = hiveConf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS); LOG.info("Max partitions allowed = {}", maxParts); IOException exc = null; diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java index 18fcfdbdd2a8..f142f3d488fa 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatExternalDynamicPartitioned.java @@ -28,7 +28,7 @@ public TestHCatExternalDynamicPartitioned(String formatName, String serdeClass, throws Exception { super(formatName, serdeClass, inputFormatClass, outputFormatClass); tableName = "testHCatExternalDynamicPartitionedTable_" + formatName; - generateWriteRecords(NUM_RECORDS, NUM_PARTITIONS, 0); + generateWriteRecords(NUM_RECORDS, NUM_TOP_PARTITIONS, 0); generateDataColumns(); } @@ -43,7 +43,7 @@ protected Boolean isTableExternal() { */ @Test public void testHCatExternalDynamicCustomLocation() throws Exception { - runHCatDynamicPartitionedTable(true, "mapred/externalDynamicOutput/${p1}"); + runHCatDynamicPartitionedTable(true, "mapred/externalDynamicOutput/${p1}/${p2}"); } } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java index e601992fc40b..d87158b23fae 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatMultiOutputFormat.java @@ -156,7 +156,7 @@ public static void setup() throws Exception { warehousedir = new Path(System.getProperty("test.warehouse.dir")); HiveConf metastoreConf = new HiveConf(); - metastoreConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehousedir.toString()); + metastoreConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, warehousedir.toString()); // Run hive metastore server MetaStoreTestUtils.startMetaStoreWithRetry(metastoreConf); @@ -183,23 +183,23 @@ public static void setup() throws Exception { private static void initializeSetup(HiveConf metastoreConf) throws Exception { hiveConf = new HiveConf(metastoreConf, TestHCatMultiOutputFormat.class); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hiveConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.THRIFT_URIS)); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehousedir.toString()); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, warehousedir.toString()); try { hmsc = new HiveMetaStoreClient(hiveConf); initalizeTables(); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java index 22a0d3f7d134..2b28f4f0d752 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestHCatPartitionPublish.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; import org.apache.hadoop.hive.serde.serdeConstants; @@ -120,23 +119,23 @@ public static void setup() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_FAILURE_RETRIES, 3); hcatConf.setTimeVar(HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, 120, TimeUnit.SECONDS); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); msc = new HiveMetaStoreClient(hcatConf); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hcatConf, MetastoreConf.ConfVars.THRIFT_URIS)); } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index c955aa502126..a787f409eb3f 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -58,10 +58,10 @@ public class TestPassProperties { public void Initialize() throws Exception { hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java index 67193d4e50f1..759a73b49988 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/rcfile/TestRCFileMapReduceInputFormat.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.LongWritable; -import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.Job; @@ -233,7 +232,7 @@ private void writeThenReadByRecordReader(int intervalRecordCount, jonconf.set("mapred.input.dir", testDir.toString()); JobContext context = new Job(jonconf); HiveConf.setLongVar(context.getConfiguration(), - HiveConf.ConfVars.MAPREDMAXSPLITSIZE, maxSplitSize); + HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, maxSplitSize); List splits = inputFormat.getSplits(context); assertEquals("splits length should be " + splitNumber, splitNumber, splits.size()); int readCount = 0; diff --git a/hcatalog/hcatalog-pig-adapter/pom.xml b/hcatalog/hcatalog-pig-adapter/pom.xml index b1109d461c07..472636d234ed 100644 --- a/hcatalog/hcatalog-pig-adapter/pom.xml +++ b/hcatalog/hcatalog-pig-adapter/pom.xml @@ -22,7 +22,7 @@ org.apache.hive.hcatalog hive-hcatalog - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-hcatalog-pig-adapter @@ -152,7 +152,7 @@ org.apache.hive hive-standalone-metastore-server - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT tests test diff --git a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java index afe6e92163b8..dbf7ac3f1a32 100644 --- a/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java +++ b/hcatalog/hcatalog-pig-adapter/src/main/java/org/apache/hive/hcatalog/pig/PigHCatUtil.java @@ -132,7 +132,7 @@ static public Pair getDBTableNames(String location) throws IOExc static public String getHCatServerUri(Job job) { - return job.getConfiguration().get(HiveConf.ConfVars.METASTOREURIS.varname); + return job.getConfiguration().get(HiveConf.ConfVars.METASTORE_URIS.varname); } static public String getHCatServerPrincipal(Job job) { @@ -153,7 +153,7 @@ private static IMetaStoreClient getHiveMetaClient(String serverUri, HiveConf hiveConf = new HiveConf(job.getConfiguration(), clazz); if (serverUri != null) { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, serverUri.trim()); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, serverUri.trim()); } if (serverKerberosPrincipal != null) { diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java index ae292eb78c16..d5e5fc311973 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java @@ -84,10 +84,10 @@ public void setUp() throws Exception { } HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java index b96479b826a0..b16d5c183d50 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderComplexSchema.java @@ -116,8 +116,8 @@ public static void setUpBeforeClass() throws Exception { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + "TestHCatLoaderComplexSchema" + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index beb4fe9f4b92..0e5691a66543 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -159,10 +159,10 @@ public void setup() throws Exception { } HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java index a0c5ce93ff27..3a2b3c15b5fc 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatStorerMulti.java @@ -95,11 +95,11 @@ public void setUp() throws Exception { if (driver == null) { HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/pom.xml b/hcatalog/pom.xml index eaf1b82c813a..2328f80ec898 100644 --- a/hcatalog/pom.xml +++ b/hcatalog/pom.xml @@ -22,7 +22,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml org.apache.hive.hcatalog @@ -75,7 +75,7 @@ org.apache.hive hive-standalone-metastore-server - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT tests test diff --git a/hcatalog/server-extensions/pom.xml b/hcatalog/server-extensions/pom.xml index b86e9f0e04ba..d6d88fcead7e 100644 --- a/hcatalog/server-extensions/pom.xml +++ b/hcatalog/server-extensions/pom.xml @@ -22,7 +22,7 @@ org.apache.hive.hcatalog hive-hcatalog - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-hcatalog-server-extensions diff --git a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java index 782fffb516b9..c53e5afa094d 100644 --- a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java +++ b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/messaging/MessageFactory.java @@ -54,7 +54,7 @@ public abstract class MessageFactory { + HCAT_MESSAGE_FORMAT, DEFAULT_MESSAGE_FACTORY_IMPL); - protected static final String HCAT_SERVER_URL = hiveConf.get(HiveConf.ConfVars.METASTOREURIS.name(), ""); + protected static final String HCAT_SERVER_URL = hiveConf.get(HiveConf.ConfVars.METASTORE_URIS.name(), ""); protected static final String HCAT_SERVICE_PRINCIPAL = hiveConf.get(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.name(), ""); /** diff --git a/hcatalog/webhcat/java-client/pom.xml b/hcatalog/webhcat/java-client/pom.xml index 48488d7e361e..a6bf4132d8b9 100644 --- a/hcatalog/webhcat/java-client/pom.xml +++ b/hcatalog/webhcat/java-client/pom.xml @@ -22,7 +22,7 @@ org.apache.hive.hcatalog hive-hcatalog - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../../pom.xml hive-webhcat-java-client diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index 0420c506136d..07c9ca57a2f7 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -54,9 +54,7 @@ import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe; -import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.mapred.TextInputFormat; -import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hive.hcatalog.DerbyPolicy; import org.apache.hive.hcatalog.api.repl.Command; import org.apache.hive.hcatalog.api.repl.ReplicationTask; @@ -109,9 +107,9 @@ public static void tearDown() throws Exception { public static void startMetaStoreServer() throws Exception { hcatConf = new HiveConf(TestHCatClient.class); - String metastoreUri = System.getProperty("test."+HiveConf.ConfVars.METASTOREURIS.varname); + String metastoreUri = System.getProperty("test."+HiveConf.ConfVars.METASTORE_URIS.varname); if (metastoreUri != null) { - hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, metastoreUri); + hcatConf.setVar(HiveConf.ConfVars.METASTORE_URIS, metastoreUri); useExternalMS = true; return; } @@ -130,17 +128,17 @@ public static void startMetaStoreServer() throws Exception { System.setSecurityManager(new NoExitSecurityManager()); Policy.setPolicy(new DerbyPolicy()); - hcatConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + hcatConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + msPort); - hcatConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + hcatConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); } public static HiveConf getConf(){ @@ -819,7 +817,7 @@ private void startReplicationTargetMetaStoreIfRequired() throws Exception { .replace("metastore", "target_metastore")); replicationTargetHCatPort = MetaStoreTestUtils.startMetaStoreWithRetry(conf); replicationTargetHCatConf = new HiveConf(hcatConf); - replicationTargetHCatConf.setVar(HiveConf.ConfVars.METASTOREURIS, + replicationTargetHCatConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + replicationTargetHCatPort); isReplicationTargetHCatRunning = true; } diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml index 4d3df293372a..e69ea170c431 100644 --- a/hcatalog/webhcat/svr/pom.xml +++ b/hcatalog/webhcat/svr/pom.xml @@ -22,7 +22,7 @@ org.apache.hive.hcatalog hive-hcatalog - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../../pom.xml hive-webhcat @@ -104,6 +104,16 @@ org.apache.zookeeper zookeeper + + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + + com.fasterxml.jackson.core diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java index b94c7d715530..d13adf97cb90 100644 --- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java +++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/AppConfig.java @@ -252,7 +252,7 @@ private void init() { private void handleHiveProperties() { HiveConf hiveConf = new HiveConf();//load hive-site.xml from classpath List interestingPropNames = Arrays.asList( - HiveConf.ConfVars.METASTOREURIS.varname, + HiveConf.ConfVars.METASTORE_URIS.varname, HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI.varname, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE.varname, diff --git a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java index 66fa5eb4ae8a..04d99254e4ed 100644 --- a/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java +++ b/hcatalog/webhcat/svr/src/main/java/org/apache/hive/hcatalog/templeton/Main.java @@ -286,7 +286,7 @@ private Connector createChannelConnector(Server server) { if (conf.getBoolean(AppConfig.USE_SSL, false)) { LOG.info("Using SSL for templeton."); - SslContextFactory sslContextFactory = new SslContextFactory(); + SslContextFactory sslContextFactory = new SslContextFactory.Server(); sslContextFactory.setKeyStorePath(conf.get(AppConfig.KEY_STORE_PATH, DEFAULT_KEY_STORE_PATH)); sslContextFactory.setKeyStorePassword(conf.get(AppConfig.KEY_STORE_PASSWORD, DEFAULT_KEY_STORE_PASSWORD)); Set excludedSSLProtocols = Sets.newHashSet(Splitter.on(",").trimResults().omitEmptyStrings() diff --git a/hplsql/pom.xml b/hplsql/pom.xml index d6793e0ab1ac..c080090b6495 100644 --- a/hplsql/pom.xml +++ b/hplsql/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-hplsql diff --git a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 index 8bec8cd78c38..14cf034ad017 100644 --- a/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 +++ b/hplsql/src/main/antlr4/org/apache/hive/hplsql/Hplsql.g4 @@ -1049,8 +1049,8 @@ expr : | expr_case | expr_cursor_attribute | expr_agg_window_func - | expr_spec_func | expr_func + | expr_spec_func | expr_atom ; @@ -1324,6 +1324,7 @@ non_reserved_words : // Tokens that are not reserved words | T_CURRENT | T_CURRENT_DATE | T_CURRENT_SCHEMA + | T_CURRENT_TIME_MILLIS | T_CURRENT_TIMESTAMP | T_CURRENT_USER | T_CURSOR @@ -1770,6 +1771,7 @@ T_MERGE : M E R G E ; T_MESSAGE_TEXT : M E S S A G E '_' T E X T ; T_MICROSECOND : M I C R O S E C O N D ; T_MICROSECONDS : M I C R O S E C O N D S; +T_MILLIS : M I L L I S ; T_MIN : M I N ; T_MULTISET : M U L T I S E T ; T_NCHAR : N C H A R ; @@ -1877,6 +1879,7 @@ T_TEMPORARY : T E M P O R A R Y ; T_TERMINATED : T E R M I N A T E D ; T_TEXTIMAGE_ON : T E X T I M A G E '_' O N ; T_THEN : T H E N ; +T_TIME : T I M E ; T_TIMESTAMP : T I M E S T A M P ; T_TINYINT : T I N Y I N T ; T_TITLE : T I T L E ; @@ -1913,6 +1916,7 @@ T_YES : Y E S ; T_ACTIVITY_COUNT : A C T I V I T Y '_' C O U N T ; T_CUME_DIST : C U M E '_' D I S T ; T_CURRENT_DATE : C U R R E N T '_' D A T E ; +T_CURRENT_TIME_MILLIS : C U R R E N T '_' T I M E '_' M I L L I S ; T_CURRENT_TIMESTAMP : C U R R E N T '_' T I M E S T A M P ; T_CURRENT_USER : C U R R E N T '_' U S E R ; T_DENSE_RANK : D E N S E '_' R A N K ; diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java index 8d5cab92e136..e423452e3da5 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Exec.java @@ -1801,7 +1801,7 @@ public Integer visitExpr_func(HplsqlParser.Expr_funcContext ctx) { private int functionCall(ParserRuleContext ctx, HplsqlParser.IdentContext ident, HplsqlParser.Expr_func_paramsContext params) { String name = ident.getText(); - if (exec.buildSql) { + if (exec.buildSql && !builtinFunctions.exists(name)) { exec.execSql(name, params); } else { name = name.toUpperCase(); diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java index eac3a6d0d2de..78a25a0f2668 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Stmt.java @@ -957,7 +957,11 @@ public Integer forCursor(HplsqlParser.For_cursor_stmtContext ctx) { int cols = query.columnCount(); Row row = new Row(); for (int i = 0; i < cols; i++) { - row.addColumnDefinition(query.metadata().columnName(i), query.metadata().columnTypeName(i)); + String columnName = query.metadata().columnName(i); + if (columnName.contains(".")) { + columnName = columnName.substring(columnName.lastIndexOf('.') + 1); + } + row.addColumnDefinition(columnName, query.metadata().columnTypeName(i)); } Var var = new Var(cursor, row); exec.addVariable(var); diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java index 198a4d0b09a3..1c7bb41824cd 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/Var.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/Var.java @@ -601,7 +601,7 @@ else if (type == Type.STRING) { return (String)value; } else if (type == Type.DATE) { - return ((Date)value).toString(); + return String.format("DATE '%s'", value); } else if (type == Type.TIMESTAMP) { int len = 19; @@ -612,7 +612,7 @@ else if (type == Type.TIMESTAMP) { if (t.length() > len) { t = t.substring(0, len); } - return t; + return String.format("TIMESTAMP '%s'", t); } return value.toString(); } diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/BuiltinFunctions.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/BuiltinFunctions.java index e5335854d054..100fa19c5349 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/BuiltinFunctions.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/BuiltinFunctions.java @@ -114,7 +114,10 @@ public void specExec(HplsqlParser.Expr_spec_funcContext ctx) { execMinPartDate(ctx); } else if (ctx.T_PART_LOC() != null) { execPartLoc(ctx); - } else { + } else if (exec.buildSql){ + exec.stackPush(Exec.getFormattedText(ctx)); + } + else { evalNull(); } } @@ -132,7 +135,7 @@ public void specExecSql(HplsqlParser.Expr_spec_funcContext ctx) { func.run(ctx); } else { - exec.stackPush(Exec.getFormattedText(ctx)); + specExec(ctx); } } diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java index 4870c14745a5..e7e562aca67f 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionDatetime.java @@ -21,7 +21,6 @@ import java.sql.Timestamp; import java.text.SimpleDateFormat; import java.util.Calendar; -import java.util.Date; import java.util.TimeZone; import org.apache.commons.lang3.StringUtils; @@ -39,15 +38,11 @@ public FunctionDatetime(Exec e, QueryExecutor queryExecutor) { @Override public void register(BuiltinFunctions f) { f.map.put("DATE", this::date); - f.map.put("FROM_UNIXTIME", this::fromUnixtime); f.map.put("NOW", ctx -> now(ctx)); f.map.put("TIMESTAMP_ISO", this::timestampIso); f.map.put("TO_TIMESTAMP", this::toTimestamp); - f.map.put("UNIX_TIMESTAMP", this::unixTimestamp); f.map.put("CURRENT_TIME_MILLIS", this::currentTimeMillis); - f.specMap.put("CURRENT_DATE", this::currentDate); - f.specMap.put("CURRENT_TIMESTAMP", this::currentTimestamp); f.specMap.put("SYSDATE", this::currentTimestamp); f.specSqlMap.put("CURRENT_DATE", (FuncSpecCommand) this::currentDateSql); @@ -57,10 +52,6 @@ public void register(BuiltinFunctions f) { /** * CURRENT_DATE */ - public void currentDate(HplsqlParser.Expr_spec_funcContext ctx) { - evalVar(currentDate()); - } - public static Var currentDate() { SimpleDateFormat f = new SimpleDateFormat("yyyy-MM-dd"); String s = f.format(Calendar.getInstance().getTime()); @@ -86,7 +77,7 @@ public void currentTimestamp(HplsqlParser.Expr_spec_funcContext ctx) { int precision = evalPop(ctx.expr(0), 3).intValue(); evalVar(currentTimestamp(precision)); } - + public static Var currentTimestamp(int precision) { String format = "yyyy-MM-dd HH:mm:ss"; if (precision > 0 && precision <= 3) { @@ -118,7 +109,9 @@ void date(HplsqlParser.Expr_func_paramsContext ctx) { return; } Var var = new Var(Var.Type.DATE); - var.cast(evalPop(ctx.func_param(0).expr())); + Var date = evalPop(ctx.func_param(0).expr()); + date.setValue(Utils.unquoteString(date.toString())); + var.cast(date); evalVar(var); } @@ -142,7 +135,9 @@ void timestampIso(HplsqlParser.Expr_func_paramsContext ctx) { return; } Var var = new Var(Var.Type.TIMESTAMP); - var.cast(evalPop(ctx.func_param(0).expr())); + Var val = evalPop(ctx.func_param(0).expr()); + val.setValue(Utils.unquoteString(val.toString())); + var.cast(val); evalVar(var); } @@ -154,8 +149,8 @@ void toTimestamp(HplsqlParser.Expr_func_paramsContext ctx) { evalNull(); return; } - String value = evalPop(ctx.func_param(0).expr()).toString(); - String sqlFormat = evalPop(ctx.func_param(1).expr()).toString(); + String value = Utils.unquoteString(evalPop(ctx.func_param(0).expr()).toString()); + String sqlFormat = Utils.unquoteString(evalPop(ctx.func_param(1).expr()).toString()); String format = Utils.convertSqlDatetimeFormat(sqlFormat); try { long timeInMs = new SimpleDateFormat(format).parse(value).getTime(); @@ -166,30 +161,6 @@ void toTimestamp(HplsqlParser.Expr_func_paramsContext ctx) { evalNull(); } } - - /** - * FROM_UNIXTIME() function (convert seconds since 1970-01-01 00:00:00 to timestamp) - */ - void fromUnixtime(HplsqlParser.Expr_func_paramsContext ctx) { - int cnt = BuiltinFunctions.getParamCount(ctx); - if (cnt == 0) { - evalNull(); - return; - } - long epoch = evalPop(ctx.func_param(0).expr()).longValue(); - String format = "yyyy-MM-dd HH:mm:ss"; - if (cnt > 1) { - format = evalPop(ctx.func_param(1).expr()).toString(); - } - evalString(new SimpleDateFormat(format).format(new Date(epoch * 1000))); - } - - /** - * UNIX_TIMESTAMP() function (current date and time in seconds since 1970-01-01 00:00:00) - */ - void unixTimestamp(HplsqlParser.Expr_func_paramsContext ctx) { - evalVar(new Var(System.currentTimeMillis()/1000)); - } public void currentTimeMillis(HplsqlParser.Expr_func_paramsContext ctx) { evalVar(new Var(System.currentTimeMillis())); diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionMisc.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionMisc.java index dba5594d933e..18c2ded2e036 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionMisc.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionMisc.java @@ -40,15 +40,11 @@ public FunctionMisc(Exec e, QueryExecutor queryExecutor) { */ @Override public void register(BuiltinFunctions f) { - f.map.put("COALESCE", this::nvl); f.map.put("DECODE", this::decode); - f.map.put("NVL", this::nvl); f.map.put("NVL2", this::nvl2); f.map.put("PART_COUNT_BY", this::partCountBy); - f.map.put("MOD", this::modulo); f.specMap.put("ACTIVITY_COUNT", this::activityCount); - f.specMap.put("CAST", this::cast); f.specMap.put("CURRENT", this::current); f.specMap.put("CURRENT_USER", this::currentUser); f.specMap.put("PART_COUNT", this::partCount); @@ -64,28 +60,6 @@ void activityCount(HplsqlParser.Expr_spec_funcContext ctx) { evalInt(Long.valueOf(exec.getRowCount())); } - /** - * CAST function - */ - void cast(HplsqlParser.Expr_spec_funcContext ctx) { - if (ctx.expr().size() != 1) { - evalNull(); - return; - } - String type = ctx.dtype().getText(); - String len = null; - String scale = null; - if (ctx.dtype_len() != null) { - len = ctx.dtype_len().L_INT(0).getText(); - if (ctx.dtype_len().L_INT(1) != null) { - scale = ctx.dtype_len().L_INT(1).getText(); - } - } - Var var = new Var(null, type, len, scale, null); - var.cast(evalPop(ctx.expr(0))); - evalVar(var); - } - /** * CURRENT function */ @@ -124,8 +98,9 @@ else if (ctx.T_TIMESTAMP() != null) { else { evalString("CURRENT_TIMESTAMP"); } - } - else { + } else if (ctx.T_USER() != null) { + evalString("CURRENT_USER()"); + } else { evalString(exec.getFormattedText(ctx)); } } @@ -138,7 +113,7 @@ void currentUser(HplsqlParser.Expr_spec_funcContext ctx) { } public static Var currentUser() { - return new Var(System.getProperty("user.name")); + return new Var("CURRENT_USER()"); } /** @@ -168,26 +143,13 @@ void decode(HplsqlParser.Expr_func_paramsContext ctx) { } } - /** - * NVL function - Return first non-NULL expression - */ - void nvl(HplsqlParser.Expr_func_paramsContext ctx) { - for (int i=0; i < ctx.func_param().size(); i++) { - Var v = evalPop(ctx.func_param(i).expr()); - if (v.type != Var.Type.NULL) { - exec.stackPush(v); - return; - } - } - evalNull(); - } - /** * NVL2 function - If expr1 is not NULL return expr2, otherwise expr3 */ void nvl2(HplsqlParser.Expr_func_paramsContext ctx) { if (ctx.func_param().size() == 3) { - if (!evalPop(ctx.func_param(0).expr()).isNull()) { + Var firstParam = evalPop(ctx.func_param(0).expr()); + if (!(firstParam.isNull() || "null".equalsIgnoreCase((String)firstParam.value))) { eval(ctx.func_param(1).expr()); } else { @@ -249,16 +211,6 @@ public void partCount(HplsqlParser.Expr_spec_funcContext ctx) { query.close(); } - public void modulo(HplsqlParser.Expr_func_paramsContext ctx) { - if (ctx.func_param().size() == 2) { - int a = evalPop(ctx.func_param(0).expr()).intValue(); - int b = evalPop(ctx.func_param(1).expr()).intValue(); - evalInt(a % b); - } else { - evalNull(); - } - } - /** * PART_COUNT_BY function */ diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java index 702da33908f9..0020189a7978 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/FunctionString.java @@ -33,18 +33,12 @@ public FunctionString(Exec e, QueryExecutor queryExecutor) { public void register(BuiltinFunctions f) { f.map.put("CONCAT", this::concat); f.map.put("CHAR", this::char_); - f.map.put("INSTR", this::instr); f.map.put("LEN", this::len); - f.map.put("LENGTH", this::length); - f.map.put("LOWER", this::lower); - f.map.put("REPLACE", this::replace); f.map.put("SUBSTR", this::substr); f.map.put("SUBSTRING", this::substr); f.map.put("TO_CHAR", this::toChar); f.map.put("UPPER", this::upper); - f.specMap.put("SUBSTRING", this::substring); - f.specMap.put("TRIM", this::trim); } /** @@ -52,12 +46,13 @@ public void register(BuiltinFunctions f) { */ void concat(HplsqlParser.Expr_func_paramsContext ctx) { StringBuilder val = new StringBuilder(); + appendSingleQuote(val); int cnt = getParamCount(ctx); boolean nulls = true; for (int i = 0; i < cnt; i++) { Var c = evalPop(ctx.func_param(i).expr()); - if (!c.isNull()) { - val.append(c.toString()); + if (!c.isNull() && !"null".equalsIgnoreCase((String)c.value)) { + val.append(Utils.unquoteString(c.toString())); nulls = false; } } @@ -65,6 +60,7 @@ void concat(HplsqlParser.Expr_func_paramsContext ctx) { evalNull(); } else { + appendSingleQuote(val); evalString(val); } } @@ -82,67 +78,6 @@ void char_(HplsqlParser.Expr_func_paramsContext ctx) { evalString(str); } - /** - * INSTR function - */ - void instr(HplsqlParser.Expr_func_paramsContext ctx) { - int cnt = getParamCount(ctx); - if (cnt < 2) { - evalNull(); - return; - } - String str = evalPop(ctx.func_param(0).expr()).toString(); - if (str == null) { - evalNull(); - return; - } - else if(str.isEmpty()) { - evalInt(0); - return; - } - String substr = evalPop(ctx.func_param(1).expr()).toString(); - int pos = 1; - int occur = 1; - int idx = 0; - if (cnt >= 3) { - pos = evalPop(ctx.func_param(2).expr()).intValue(); - if (pos == 0) { - pos = 1; - } - } - if (cnt >= 4) { - occur = evalPop(ctx.func_param(3).expr()).intValue(); - if (occur < 0) { - occur = 1; - } - } - for (int i = occur; i > 0; i--) { - if (pos > 0) { - idx = str.indexOf(substr, pos - 1); - } - else { - str = str.substring(0, str.length() - pos*(-1)); - idx = str.lastIndexOf(substr); - } - if (idx == -1) { - idx = 0; - break; - } - else { - idx++; - } - if (i > 1) { - if (pos > 0) { - pos = idx + 1; - } - else { - pos = (str.length() - idx + 1) * (-1); - } - } - } - evalInt(idx); - } - /** * LEN function (excluding trailing spaces) */ @@ -151,49 +86,10 @@ void len(HplsqlParser.Expr_func_paramsContext ctx) { evalNull(); return; } - int len = evalPop(ctx.func_param(0).expr()).toString().trim().length(); + int len = Utils.unquoteString(evalPop(ctx.func_param(0).expr()).toString()).trim().length(); evalInt(len); } - - /** - * LENGTH function - */ - void length(HplsqlParser.Expr_func_paramsContext ctx) { - if (ctx.func_param().size() != 1) { - evalNull(); - return; - } - int len = evalPop(ctx.func_param(0).expr()).toString().length(); - evalInt(len); - } - - /** - * LOWER function - */ - void lower(HplsqlParser.Expr_func_paramsContext ctx) { - if (ctx.func_param().size() != 1) { - evalNull(); - return; - } - String str = evalPop(ctx.func_param(0).expr()).toString().toLowerCase(); - evalString(str); - } - - /** - * REPLACE function - */ - void replace(HplsqlParser.Expr_func_paramsContext ctx) { - int cnt = getParamCount(ctx); - if (cnt < 3) { - evalNull(); - return; - } - String str = evalPop(ctx.func_param(0).expr()).toString(); - String what = evalPop(ctx.func_param(1).expr()).toString(); - String with = evalPop(ctx.func_param(2).expr()).toString(); - evalString(str.replaceAll(what, with)); - } - + /** * SUBSTR and SUBSTRING function */ @@ -203,18 +99,18 @@ void substr(HplsqlParser.Expr_func_paramsContext ctx) { evalNull(); return; } - String str = evalPop(ctx.func_param(0).expr()).toString(); + String str = Utils.unquoteString(evalPop(ctx.func_param(0).expr()).toString()); int start = evalPop(ctx.func_param(1).expr()).intValue(); int len = -1; if (start == 0) { - start = 1; + start = 1; } if (cnt > 2) { len = evalPop(ctx.func_param(2).expr()).intValue(); } substr(str, start, len); } - + void substr(String str, int start, int len) { if (str == null) { evalNull(); @@ -225,27 +121,42 @@ else if (str.isEmpty()) { return; } if (start == 0) { - start = 1; + start = 1; } + StringBuilder resultStr = new StringBuilder(); if (len == -1) { if (start > 0) { - evalString(str.substring(start - 1)); + String substring = str.substring(start - 1); + appendSingleQuote(resultStr); + resultStr.append(substring); + appendSingleQuote(resultStr); + evalString(resultStr); } } else { - evalString(str.substring(start - 1, start - 1 + len)); + String substring = str.substring(start - 1, start - 1 + len); + appendSingleQuote(resultStr); + resultStr.append(substring); + appendSingleQuote(resultStr); + evalString(resultStr); } } - + + private void appendSingleQuote(StringBuilder resultStr) { + if (exec.buildSql) { + resultStr.append("'"); + } + } + /** * SUBSTRING FROM FOR function */ void substring(HplsqlParser.Expr_spec_funcContext ctx) { - String str = evalPop(ctx.expr(0)).toString(); + String str = evalPop(ctx.expr(0)).toString(); int start = evalPop(ctx.expr(1)).intValue(); int len = -1; if (start == 0) { - start = 1; + start = 1; } if (ctx.T_FOR() != null) { len = evalPop(ctx.expr(2)).intValue(); @@ -253,19 +164,6 @@ void substring(HplsqlParser.Expr_spec_funcContext ctx) { substr(str, start, len); } - /** - * TRIM function - */ - void trim(HplsqlParser.Expr_spec_funcContext ctx) { - int cnt = ctx.expr().size(); - if (cnt != 1) { - evalNull(); - return; - } - String str = evalPop(ctx.expr(0)).toString(); - evalString(str.trim()); - } - /** * TO_CHAR function */ diff --git a/hplsql/src/main/java/org/apache/hive/hplsql/functions/InMemoryFunctionRegistry.java b/hplsql/src/main/java/org/apache/hive/hplsql/functions/InMemoryFunctionRegistry.java index 262e8c74bc93..767cb50e221a 100644 --- a/hplsql/src/main/java/org/apache/hive/hplsql/functions/InMemoryFunctionRegistry.java +++ b/hplsql/src/main/java/org/apache/hive/hplsql/functions/InMemoryFunctionRegistry.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.HashMap; +import java.util.List; import java.util.Map; import org.antlr.v4.runtime.ParserRuleContext; @@ -132,39 +133,77 @@ private boolean execProc(String name, HplsqlParser.Expr_func_paramsContext ctx) /** * Set parameters for user-defined function call */ - public static void setCallParameters(String procName, HplsqlParser.Expr_func_paramsContext actual, ArrayList actualValues, - HplsqlParser.Create_routine_paramsContext formal, - HashMap out, - Exec exec) { - if (actual == null || actual.func_param() == null || actualValues == null) { + public static void setCallParameters(String procName, HplsqlParser.Expr_func_paramsContext actual, + ArrayList actualValues, HplsqlParser.Create_routine_paramsContext formal, HashMap out, + Exec exec) { + // if it is a non-parameter function then just return. + if (actual == null && formal == null) { return; } - int actualCnt = actualValues.size(); - int formalCnt = formal.create_routine_param_item().size(); - if (formalCnt != actualCnt) { - throw new ArityException(actual.getParent(), procName, formalCnt, actualCnt); + int actualCnt = (actualValues == null) ? 0 : actualValues.size(); + List routineParamItem = formal.create_routine_param_item(); + int formalCnt = routineParamItem.size(); + ParserRuleContext ruleContext = (actual == null) ? null : actual.getParent(); + if (actualCnt > formalCnt) { + throw new ArityException(ruleContext, procName, formalCnt, actualCnt); } + Map defaultParamNamesVsIndexes = new HashMap<>(); + if (actualCnt != formalCnt) { + populateDefaultParamDetails(routineParamItem, defaultParamNamesVsIndexes); + } + + // set the passed params for (int i = 0; i < actualCnt; i++) { - HplsqlParser.ExprContext a = actual.func_param(i).expr(); - HplsqlParser.Create_routine_param_itemContext p = getCallParameter(actual, formal, i); - String name = p.ident().getText(); - String type = p.dtype().getText(); - String len = null; - String scale = null; - if (p.dtype_len() != null) { - len = p.dtype_len().L_INT(0).getText(); - if (p.dtype_len().L_INT(1) != null) { - scale = p.dtype_len().L_INT(1).getText(); - } + HplsqlParser.ExprContext exprContext = actual.func_param(i).expr(); + HplsqlParser.Create_routine_param_itemContext paramItemContext = getCallParameter(actual, formal, i); + Var value = actualValues.get(i); + // for any default param value is passed then remove it from default param list + defaultParamNamesVsIndexes.remove(paramItemContext.ident().getText()); + setCallParameter(actual, out, exec, exprContext, paramItemContext, value); + } + // set the remaining default params + for (int index : defaultParamNamesVsIndexes.values()) { + HplsqlParser.Create_routine_param_itemContext paramItemContext = formal.create_routine_param_item().get(index); + HplsqlParser.ExprContext exprContext = paramItemContext.dtype_default().expr(); + Var value = exec.evalPop(paramItemContext.dtype_default().expr()); + setCallParameter(actual, out, exec, exprContext, paramItemContext, value); + } + // if actual param count + remaining default param count is lesser than formal param count then throw exception as some params are missing + if ((actualCnt + defaultParamNamesVsIndexes.size()) != formalCnt) { + throw new ArityException(ruleContext, procName, formalCnt, actualCnt); + } + } + + private static void populateDefaultParamDetails(List routineParamItem, + Map defaultParamNamesVsIndexes) { + int formalCnt = routineParamItem.size(); + for (int i = 0; i < formalCnt; i++) { + HplsqlParser.Create_routine_param_itemContext routineParamItemContext = routineParamItem.get(i); + if (routineParamItemContext.dtype_default() != null) { + defaultParamNamesVsIndexes.put(routineParamItemContext.ident().getText(), i); + } + } + } + + private static void setCallParameter(HplsqlParser.Expr_func_paramsContext actual, HashMap out, Exec exec, + HplsqlParser.ExprContext a, HplsqlParser.Create_routine_param_itemContext p, Var value) { + String name = p.ident().getText(); + String type = p.dtype().getText(); + String len = null; + String scale = null; + if (p.dtype_len() != null) { + len = p.dtype_len().L_INT(0).getText(); + if (p.dtype_len().L_INT(1) != null) { + scale = p.dtype_len().L_INT(1).getText(); } - Var var = setCallParameter(name, type, len, scale, actualValues.get(i), exec); - exec.trace(actual, "SET PARAM " + name + " = " + var.toString()); - if (out != null && a.expr_atom() != null && a.expr_atom().qident() != null && - (p.T_OUT() != null || p.T_INOUT() != null)) { - String actualName = a.expr_atom().qident().getText(); - if (actualName != null) { - out.put(actualName, var); - } + } + Var variable = setCallParameter(name, type, len, scale, value, exec); + exec.trace(actual, "SET PARAM " + name + " = " + variable.toString()); + if (out != null && a.expr_atom() != null && a.expr_atom() + .qident() != null && (p.T_OUT() != null || p.T_INOUT() != null)) { + String actualName = a.expr_atom().qident().getText(); + if (actualName != null) { + out.put(actualName, variable); } } } diff --git a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java index 064ea9116e23..0af1f83be423 100644 --- a/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java +++ b/hplsql/src/test/java/org/apache/hive/hplsql/TestHplsqlLocal.java @@ -64,26 +64,11 @@ public void testCase() throws Exception { run("case"); } - @Test - public void testCast() throws Exception { - run("cast"); - } - - @Test - public void testCast2() throws Exception { - run("cast2"); - } - @Test public void testChar() throws Exception { run("char"); } - @Test - public void testCoalesce() throws Exception { - run("coalesce"); - } - @Test public void testConcat() throws Exception { run("concat"); @@ -273,10 +258,6 @@ public void testIf3Bteq() throws Exception { public void testInclude() throws Exception { run("include"); } - @Test - public void testInstr() throws Exception { - run("instr"); - } @Test public void testInterval() throws Exception { @@ -293,31 +274,16 @@ public void testLeave() throws Exception { run("leave"); } - @Test - public void testLength() throws Exception { - run("length"); - } - @Test public void testLen() throws Exception { run("len"); } - @Test - public void testLower() throws Exception { - run("lower"); - } - @Test public void testMultDiv() throws Exception { run("mult_div"); } - @Test - public void testNvl() throws Exception { - run("nvl"); - } - @Test public void testNvl2() throws Exception { run("nvl2"); @@ -327,11 +293,6 @@ public void testNvl2() throws Exception { public void testPrint() throws Exception { run("print"); } - - @Test - public void testReplace() throws Exception { - run("replace"); - } @Test public void testReturn() throws Exception { @@ -363,11 +324,6 @@ public void testTimestampIso() throws Exception { run("timestamp_iso"); } - @Test - public void testTimestamp() throws Exception { - run("timestamp"); - } - @Test public void testToChar() throws Exception { run("to_char"); @@ -378,11 +334,6 @@ public void testToTimestamp() throws Exception { run("to_timestamp"); } - @Test - public void testTrim() throws Exception { - run("trim"); - } - @Test public void testTwoPipes() throws Exception { run("twopipes"); diff --git a/hplsql/src/test/queries/local/cast.sql b/hplsql/src/test/queries/local/cast.sql deleted file mode 100644 index 3adab22823dd..000000000000 --- a/hplsql/src/test/queries/local/cast.sql +++ /dev/null @@ -1,4 +0,0 @@ -CAST('Abc' AS CHAR(1)); -CAST('Abc' AS VARCHAR(2)); -CAST('Abc' AS CHAR); -CAST(TIMESTAMP '2015-03-12 10:58:34.111' AS CHAR(10)) diff --git a/hplsql/src/test/queries/local/cast2.sql b/hplsql/src/test/queries/local/cast2.sql deleted file mode 100644 index d68db6d1b756..000000000000 --- a/hplsql/src/test/queries/local/cast2.sql +++ /dev/null @@ -1,10 +0,0 @@ -temp_int = CAST('1' AS int); -print temp_int -temp_float = CAST('1.2' AS float); -print temp_float -temp_double = CAST('1.2' AS double); -print temp_double -temp_decimal = CAST('1.2' AS decimal(10, 4)); -print temp_decimal -temp_string = CAST('1.2' AS string); -print temp_string \ No newline at end of file diff --git a/hplsql/src/test/queries/local/coalesce.sql b/hplsql/src/test/queries/local/coalesce.sql deleted file mode 100644 index 4b65d58b56fb..000000000000 --- a/hplsql/src/test/queries/local/coalesce.sql +++ /dev/null @@ -1,4 +0,0 @@ -COALESCE('First non-null', 1); -COALESCE(NULL, 'First non-null'); -COALESCE(NULL, 'First non-null', 1); -COALESCE(NULL, NULL, 'First non-null', 1); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/concat.sql b/hplsql/src/test/queries/local/concat.sql index b7769bb98247..37b7619ccb5d 100644 --- a/hplsql/src/test/queries/local/concat.sql +++ b/hplsql/src/test/queries/local/concat.sql @@ -1,2 +1,2 @@ CONCAT('a', 'b', NULL, 'c'); -NVL(CONCAT(NULL, NULL, NULL), 'NULL Value'); \ No newline at end of file +NVL2(CONCAT(NULL, NULL, NULL), NULL, 'NULL Value'); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/create_function3.sql b/hplsql/src/test/queries/local/create_function3.sql index 840c09573566..ec1de72fceb0 100644 --- a/hplsql/src/test/queries/local/create_function3.sql +++ b/hplsql/src/test/queries/local/create_function3.sql @@ -6,47 +6,47 @@ FUNCTION gettype(tag1 varchar2, srcvalue varchar2) return varchar2 as return '@I'; end if; - if trim(tag1) = 'WHMM' then + if (tag1) = 'WHMM' then return '002'; end if; - if trim(tag1) = 'TCPJ' and srcvalue = '010105' then + if (tag1) = 'TCPJ' and srcvalue = '010105' then return '010105'; end if; - if trim(tag1) = 'TCPJ' and srcvalue != '010105' then + if (tag1) = 'TCPJ' and srcvalue != '010105' then return '003'; end if; - if trim(tag1) = 'TCPJ' and srcvalue != '010105' then + if (tag1) = 'TCPJ' and srcvalue != '010105' then return '003_ticket'; end if; - if trim(tag1) = 'TCJY' and srcvalue != '010105' then + if (tag1) = 'TCJY' and srcvalue != '010105' then return '003_ticket'; end if; - if trim(tag1) = 'TCJY' and srcvalue != '010105' then + if (tag1) = 'TCJY' and srcvalue != '010105' then return '003_ticket'; end if; - if trim(tag1) = 'YHHPD' then + if (tag1) = 'YHHPD' then return '002_foreign'; end if; - if trim(tag1) = 'WHWZ' then + if (tag1) = 'WHWZ' then return '002_foreign'; end if; - if trim(tag1) = 'WHLZ' then + if (tag1) = 'WHLZ' then return '002_foreign'; end if; - if trim(tag1) = 'DEWZ' then + if (tag1) = 'DEWZ' then return '024_out'; end if; - if trim(tag1) = 'DELZ' then + if (tag1) = 'DELZ' then return '024_out'; end if; diff --git a/hplsql/src/test/queries/local/create_function4.sql b/hplsql/src/test/queries/local/create_function4.sql index 21986e081719..bc3733436499 100644 --- a/hplsql/src/test/queries/local/create_function4.sql +++ b/hplsql/src/test/queries/local/create_function4.sql @@ -4,10 +4,10 @@ FUNCTION get(CODE VARCHAR2) RETURN VARCHAR2 AS TMPVAR := ''; - IF TRIM(TMPVAR) = '' THEN + IF (TMPVAR) = '' THEN RETURN '00080000'; ELSE - RETURN TRIM(TMPVAR); + RETURN (TMPVAR); END IF; EXCEPTION WHEN NO_DATA_FOUND THEN diff --git a/hplsql/src/test/queries/local/date.sql b/hplsql/src/test/queries/local/date.sql index 2ef474329afa..981075ce33d6 100644 --- a/hplsql/src/test/queries/local/date.sql +++ b/hplsql/src/test/queries/local/date.sql @@ -1,5 +1,4 @@ DATE '2014-12-20' DATE('2015-03-12'); -DATE('2015' || '-03-' || '12'); -DATE(TIMESTAMP '2015-03-12 10:58:34.111'); \ No newline at end of file +DATE('2015' || '-03-' || '12'); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/expr.sql b/hplsql/src/test/queries/local/expr.sql index 33388a25b121..e65dff98b546 100644 --- a/hplsql/src/test/queries/local/expr.sql +++ b/hplsql/src/test/queries/local/expr.sql @@ -15,7 +15,4 @@ PRINT c; PRINT 'Integer decrement'; c := 3; c := c - 1; -PRINT c; - -PRINT NVL(null - 3, 'Correct'); -PRINT NVL(null + 3, 'Correct'); \ No newline at end of file +PRINT c; \ No newline at end of file diff --git a/hplsql/src/test/queries/local/if2.sql b/hplsql/src/test/queries/local/if2.sql index b645b86e72f2..058847939715 100644 --- a/hplsql/src/test/queries/local/if2.sql +++ b/hplsql/src/test/queries/local/if2.sql @@ -1,4 +1,4 @@ -if not (coalesce(1,0) between 3 and 5) then +if not (nvl2(1,0) between 3 and 5) then print 'correct'; else print 'failed'; diff --git a/hplsql/src/test/queries/local/instr.sql b/hplsql/src/test/queries/local/instr.sql deleted file mode 100644 index 9cd8dcacc3cd..000000000000 --- a/hplsql/src/test/queries/local/instr.sql +++ /dev/null @@ -1,49 +0,0 @@ -IF INSTR('abc', 'b') = 2 THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -IF INSTR('abcabc', 'b', 3) = 5 THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -IF INSTR('abcabcabc', 'b', 3, 2) = 8 THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -IF INSTR('abcabcabc', 'b', -3) = 5 THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -IF INSTR('abcabcabc', 'b', -3, 2) = 2 THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -DECLARE c STRING; - -IF INSTR(c, 'b') IS NULL THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -IF INSTR(NULL, 'b') IS NULL THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; - -IF INSTR('', 'b') = 0 THEN - PRINT 'Correct'; -ELSE - PRINT 'Failed'; -END IF; \ No newline at end of file diff --git a/hplsql/src/test/queries/local/interval.sql b/hplsql/src/test/queries/local/interval.sql index e40fcbd229c7..98cf4927c7ee 100644 --- a/hplsql/src/test/queries/local/interval.sql +++ b/hplsql/src/test/queries/local/interval.sql @@ -2,15 +2,15 @@ DATE '2015-03-12' + 1 DAY; TIMESTAMP '2015-03-12' + 1 DAY; TIMESTAMP '2015-03-12 10:10:10.000' + 1 MICROSECOND; -DATE '2015-03-12' + NVL(NULL, 3) DAYS; -TIMESTAMP '2015-03-12' + NVL(NULL, 3) DAYS; +DATE '2015-03-12' + NVL2(NULL, NULL, 3) DAYS; +TIMESTAMP '2015-03-12' + NVL2(NULL, NULL, 3) DAYS; DATE '2015-03-12' - 1 DAY; TIMESTAMP '2015-03-12' - 1 DAY; TIMESTAMP '2015-03-12 10:10:10.000' - 1 MICROSECOND; -DATE '2015-03-12' - NVL(NULL, 3) DAYS; -TIMESTAMP '2015-03-12' - NVL(NULL, 3) DAYS; +DATE '2015-03-12' - NVL2(NULL, NULL, 3) DAYS; +TIMESTAMP '2015-03-12' - NVL2(NULL, NULL, 3) DAYS; TIMESTAMP '2015-03-12' - 1 DAY - 1 MICROSECOND; diff --git a/hplsql/src/test/queries/local/length.sql b/hplsql/src/test/queries/local/length.sql deleted file mode 100644 index 42cf3ccf0a84..000000000000 --- a/hplsql/src/test/queries/local/length.sql +++ /dev/null @@ -1 +0,0 @@ -LENGTH('Abc '); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/lower.sql b/hplsql/src/test/queries/local/lower.sql deleted file mode 100644 index f29b0e970eae..000000000000 --- a/hplsql/src/test/queries/local/lower.sql +++ /dev/null @@ -1 +0,0 @@ -LOWER('ABC'); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/mult_div.sql b/hplsql/src/test/queries/local/mult_div.sql index ebad8f46a15a..23fe88f91d2f 100644 --- a/hplsql/src/test/queries/local/mult_div.sql +++ b/hplsql/src/test/queries/local/mult_div.sql @@ -4,5 +4,5 @@ declare c int default 2; print a/b/c; -set a = 4 * 2 / cast(4 as int) /2; -set b = 4 * 2 /cast(4 as int)/2; \ No newline at end of file +set a = 4 * 2 / 4 / 2; +set b = 4 * 2 / 4 / 2; \ No newline at end of file diff --git a/hplsql/src/test/queries/local/nvl.sql b/hplsql/src/test/queries/local/nvl.sql deleted file mode 100644 index 1a843bcb28a0..000000000000 --- a/hplsql/src/test/queries/local/nvl.sql +++ /dev/null @@ -1,4 +0,0 @@ -NVL('First non-null', 1); -NVL(NULL, 'First non-null'); -NVL(NULL, 'First non-null', 1); -NVL(NULL, NULL, 'First non-null', 1); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/replace.sql b/hplsql/src/test/queries/local/replace.sql deleted file mode 100644 index 820aa067870f..000000000000 --- a/hplsql/src/test/queries/local/replace.sql +++ /dev/null @@ -1 +0,0 @@ -replace('2016-03-03', '-', ''); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/timestamp.sql b/hplsql/src/test/queries/local/timestamp.sql deleted file mode 100644 index 2971ceac8883..000000000000 --- a/hplsql/src/test/queries/local/timestamp.sql +++ /dev/null @@ -1,4 +0,0 @@ -TIMESTAMP '2015-03-03 11:39:31.123456'; -TIMESTAMP '2015-03-03 11:39:31.123'; -TIMESTAMP '2015-03-03 11:39:31'; -TIMESTAMP '2015-03-03-11.39.31.123'; \ No newline at end of file diff --git a/hplsql/src/test/queries/local/timestamp_iso.sql b/hplsql/src/test/queries/local/timestamp_iso.sql index 9bcdfe089040..e6601801fe12 100644 --- a/hplsql/src/test/queries/local/timestamp_iso.sql +++ b/hplsql/src/test/queries/local/timestamp_iso.sql @@ -1,2 +1 @@ -TIMESTAMP_ISO('2015-03-12'); -TIMESTAMP_ISO(DATE '2015-03-12'); \ No newline at end of file +TIMESTAMP_ISO('2015-03-12'); \ No newline at end of file diff --git a/hplsql/src/test/queries/local/trim.sql b/hplsql/src/test/queries/local/trim.sql deleted file mode 100644 index f8a2978d6503..000000000000 --- a/hplsql/src/test/queries/local/trim.sql +++ /dev/null @@ -1 +0,0 @@ -'#' || TRIM(' Hello ') || '#'; \ No newline at end of file diff --git a/hplsql/src/test/results/local/add.out.txt b/hplsql/src/test/results/local/add.out.txt index 37a195bfe189..7cfd6d68a017 100644 --- a/hplsql/src/test/results/local/add.out.txt +++ b/hplsql/src/test/results/local/add.out.txt @@ -1,2 +1,2 @@ -2015-01-01 -2015-01-01 +DATE '2015-01-01' +DATE '2015-01-01' diff --git a/hplsql/src/test/results/local/arity.out.txt b/hplsql/src/test/results/local/arity.out.txt index ccf4e589e752..43542c011a27 100644 --- a/hplsql/src/test/results/local/arity.out.txt +++ b/hplsql/src/test/results/local/arity.out.txt @@ -7,4 +7,5 @@ a=1 Ln:4 PRINT b=2 Ln:8 EXEC PROCEDURE P +Ln:8 SET PARAM a = 1 Ln:8 wrong number of arguments in call to 'P'. Expected 2 got 1. diff --git a/hplsql/src/test/results/local/cast.out.txt b/hplsql/src/test/results/local/cast.out.txt deleted file mode 100644 index f3de493e67c1..000000000000 --- a/hplsql/src/test/results/local/cast.out.txt +++ /dev/null @@ -1,8 +0,0 @@ -Ln:1 FUNC CAST -A -Ln:2 FUNC CAST -Ab -Ln:3 FUNC CAST -Abc -Ln:4 FUNC CAST -2015-03-12 diff --git a/hplsql/src/test/results/local/cast2.out.txt b/hplsql/src/test/results/local/cast2.out.txt deleted file mode 100644 index fc136b8f3f20..000000000000 --- a/hplsql/src/test/results/local/cast2.out.txt +++ /dev/null @@ -1,15 +0,0 @@ -Ln:1 SET temp_int = 1 -Ln:2 PRINT -1 -Ln:3 SET temp_float = 1.2 -Ln:4 PRINT -1.2 -Ln:5 SET temp_double = 1.2 -Ln:6 PRINT -1.2 -Ln:7 SET temp_decimal = 1.2 -Ln:8 PRINT -1.2 -Ln:9 SET temp_string = '1.2' -Ln:10 PRINT -1.2 diff --git a/hplsql/src/test/results/local/coalesce.out.txt b/hplsql/src/test/results/local/coalesce.out.txt deleted file mode 100644 index a111c8575fba..000000000000 --- a/hplsql/src/test/results/local/coalesce.out.txt +++ /dev/null @@ -1,4 +0,0 @@ -First non-null -First non-null -First non-null -First non-null diff --git a/hplsql/src/test/results/local/date.out.txt b/hplsql/src/test/results/local/date.out.txt index 118bd29a8c2a..1e4435a3a244 100644 --- a/hplsql/src/test/results/local/date.out.txt +++ b/hplsql/src/test/results/local/date.out.txt @@ -1,4 +1,3 @@ -2014-12-20 -2015-03-12 -2015-03-12 -2015-03-12 +DATE '2014-12-20' +DATE '2015-03-12' +DATE '2015-03-12' diff --git a/hplsql/src/test/results/local/declare.out.txt b/hplsql/src/test/results/local/declare.out.txt index 2b6a6d7d6718..cb970ea048a7 100644 --- a/hplsql/src/test/results/local/declare.out.txt +++ b/hplsql/src/test/results/local/declare.out.txt @@ -9,5 +9,5 @@ Ln:12 DECLARE status INT = 1 Ln:12 DECLARE status2 INT = 1 Ln:13 DECLARE count SMALLINT Ln:13 DECLARE limit INT = 100 -Ln:15 DECLARE dt DATE = 2015-05-13 -Ln:16 DECLARE ts TIMESTAMP = 2015-05-13 11:10:01 +Ln:15 DECLARE dt DATE = DATE '2015-05-13' +Ln:16 DECLARE ts TIMESTAMP = TIMESTAMP '2015-05-13 11:10:01' diff --git a/hplsql/src/test/results/local/expr.out.txt b/hplsql/src/test/results/local/expr.out.txt index 377f8d14f453..1ae8e373bcda 100644 --- a/hplsql/src/test/results/local/expr.out.txt +++ b/hplsql/src/test/results/local/expr.out.txt @@ -23,7 +23,3 @@ Ln:16 SET c = 3 Ln:17 SET c = 2 Ln:18 PRINT 2 -Ln:20 PRINT -Correct -Ln:21 PRINT -Correct diff --git a/hplsql/src/test/results/local/instr.out.txt b/hplsql/src/test/results/local/instr.out.txt deleted file mode 100644 index 9a23e12ae6b4..000000000000 --- a/hplsql/src/test/results/local/instr.out.txt +++ /dev/null @@ -1,33 +0,0 @@ -Ln:1 IF -Ln:1 IF TRUE executed -Ln:2 PRINT -Correct -Ln:7 IF -Ln:7 IF TRUE executed -Ln:8 PRINT -Correct -Ln:13 IF -Ln:13 IF TRUE executed -Ln:14 PRINT -Correct -Ln:19 IF -Ln:19 IF TRUE executed -Ln:20 PRINT -Correct -Ln:25 IF -Ln:25 IF TRUE executed -Ln:26 PRINT -Correct -Ln:31 DECLARE c STRING -Ln:33 IF -Ln:33 IF TRUE executed -Ln:34 PRINT -Correct -Ln:39 IF -Ln:39 IF TRUE executed -Ln:40 PRINT -Correct -Ln:45 IF -Ln:45 IF TRUE executed -Ln:46 PRINT -Correct diff --git a/hplsql/src/test/results/local/interval.out.txt b/hplsql/src/test/results/local/interval.out.txt index d73d95a49615..3e14a4b45245 100644 --- a/hplsql/src/test/results/local/interval.out.txt +++ b/hplsql/src/test/results/local/interval.out.txt @@ -1,12 +1,12 @@ -2015-03-13 -2015-03-13 00:00:00 -2015-03-12 10:10:10.001 -2015-03-15 -2015-03-15 00:00:00 -2015-03-11 -2015-03-11 00:00:00 -2015-03-12 10:10:09.999 -2015-03-09 -2015-03-09 00:00:00 -2015-03-10 23:59:59 -2016-01-24 +DATE '2015-03-13' +TIMESTAMP '2015-03-13 00:00:00' +TIMESTAMP '2015-03-12 10:10:10.001' +DATE '2015-03-15' +TIMESTAMP '2015-03-15 00:00:00' +DATE '2015-03-11' +TIMESTAMP '2015-03-11 00:00:00' +TIMESTAMP '2015-03-12 10:10:09.999' +DATE '2015-03-09' +TIMESTAMP '2015-03-09 00:00:00' +TIMESTAMP '2015-03-10 23:59:59' +DATE '2016-01-24' diff --git a/hplsql/src/test/results/local/invalid_syntax.out.txt b/hplsql/src/test/results/local/invalid_syntax.out.txt index 56206511ecb8..26eba40e305f 100644 --- a/hplsql/src/test/results/local/invalid_syntax.out.txt +++ b/hplsql/src/test/results/local/invalid_syntax.out.txt @@ -1,3 +1,3 @@ Syntax error at line 1:27 no viable alternative at input 'b,' -Syntax error at line 1:27 mismatched input ',' expecting {, '@', '#', '!', T_ACTION, T_ADD2, T_ALL, T_ALLOCATE, T_ALTER, T_AND, T_ANSI_NULLS, T_ANSI_PADDING, T_AS, T_ASC, T_ASSOCIATE, T_AT, T_AUTO_INCREMENT, T_AVG, T_BATCHSIZE, T_BEGIN, T_BETWEEN, T_BIGINT, T_BINARY_DOUBLE, T_BINARY_FLOAT, T_BIT, T_BODY, T_BREAK, T_BULK, T_BY, T_BYTE, T_CALL, T_CALLER, T_CASCADE, T_CASE, T_CASESPECIFIC, T_CAST, T_CHAR, T_CHARACTER, T_CHARSET, T_CLIENT, T_CLOSE, T_CLUSTERED, T_CMP, T_COLLECT, T_COLLECTION, T_COLUMN, T_COMMENT, T_CONSTANT, T_COMMIT, T_COMPRESS, T_CONCAT, T_CONDITION, T_CONSTRAINT, T_CONTINUE, T_COPY, T_COUNT, T_COUNT_BIG, T_CREATE, T_CREATION, T_CREATOR, T_CS, T_CURRENT, T_CURRENT_SCHEMA, T_CURSOR, T_DATABASE, T_DATA, T_DATE, T_DATETIME, T_DAY, T_DAYS, T_DEC, T_DECIMAL, T_DECLARE, T_DEFAULT, T_DEFERRED, T_DEFINED, T_DEFINER, T_DEFINITION, T_DELETE, T_DELIMITED, T_DELIMITER, T_DESC, T_DESCRIBE, T_DIAGNOSTICS, T_DIR, T_DIRECTORY, T_DISTINCT, T_DISTRIBUTE, T_DO, T_DOUBLE, T_DROP, T_DYNAMIC, T_ENABLE, T_END, T_ENGINE, T_ESCAPED, T_EXCEPT, T_EXEC, T_EXECUTE, T_EXCEPTION, T_EXCLUSIVE, T_EXISTS, T_EXIT, T_FALLBACK, T_FALSE, T_FETCH, T_FIELDS, T_FILE, T_FILES, T_FLOAT, T_FOR, T_FOREIGN, T_FORMAT, T_FOUND, T_FROM, T_FULL, T_FUNCTION, T_GET, T_GLOBAL, T_GO, T_GRANT, T_GROUP, T_HANDLER, T_HASH, T_HAVING, T_HDFS, T_HIVE, T_HOST, T_IDENTITY, T_IF, T_IGNORE, T_IMMEDIATE, T_IN, T_INCLUDE, T_INDEX, T_INITRANS, T_INNER, T_INOUT, T_INSERT, T_INT, T_INT2, T_INT4, T_INT8, T_INTEGER, T_INTERSECT, T_INTERVAL, T_INTO, T_INVOKER, T_IS, T_ISOPEN, T_ITEMS, T_JOIN, T_KEEP, T_KEY, T_KEYS, T_LANGUAGE, T_LEAVE, T_LEFT, T_LIKE, T_LIMIT, T_LINES, T_LOCAL, T_LOCATION, T_LOCATOR, T_LOCATORS, T_LOCKS, T_LOG, T_LOGGED, T_LOGGING, T_LOOP, T_MAP, T_MATCHED, T_MAX, T_MAXTRANS, T_MERGE, T_MESSAGE_TEXT, T_MICROSECOND, T_MICROSECONDS, T_MIN, T_MULTISET, T_NCHAR, T_NEW, T_NVARCHAR, T_NO, T_NOCOUNT, T_NOCOMPRESS, T_NOLOGGING, T_NONE, T_NOT, T_NOTFOUND, T_NULL, T_NUMERIC, T_NUMBER, T_OBJECT, T_OFF, T_ON, T_ONLY, T_OPEN, T_OR, T_ORDER, T_OUT, T_OUTER, T_OVER, T_OVERWRITE, T_OWNER, T_PACKAGE, T_PARTITION, T_PCTFREE, T_PCTUSED, T_PRECISION, T_PRESERVE, T_PRIMARY, T_PRINT, T_PROC, T_PROCEDURE, T_QUALIFY, T_QUERY_BAND, T_QUIT, T_QUOTED_IDENTIFIER, T_RAISE, T_REAL, T_REFERENCES, T_REGEXP, T_REPLACE, T_RESIGNAL, T_RESTRICT, T_RESULT, T_RESULT_SET_LOCATOR, T_RETURN, T_RETURNS, T_REVERSE, T_RIGHT, T_RLIKE, T_ROLE, T_ROLLBACK, T_ROW, T_ROWS, T_ROW_COUNT, T_RR, T_RS, T_PWD, T_TRIM, T_SCHEMA, T_SECOND, T_SECONDS, T_SECURITY, T_SEGMENT, T_SEL, T_SELECT, T_SET, T_SESSION, T_SESSIONS, T_SETS, T_SHARE, T_SIGNAL, T_SIMPLE_DOUBLE, T_SIMPLE_FLOAT, T_SMALLDATETIME, T_SMALLINT, T_SQL, T_SQLEXCEPTION, T_SQLINSERT, T_SQLSTATE, T_SQLWARNING, T_STATS, T_STATISTICS, T_STEP, T_STORAGE, T_STORED, T_STRING, T_SUBDIR, T_SUBSTRING, T_SUM, T_SUMMARY, T_SYS_REFCURSOR, T_TABLE, T_TABLESPACE, T_TEMPORARY, T_TERMINATED, T_TEXTIMAGE_ON, T_THEN, T_TIMESTAMP, T_TITLE, T_TO, T_TOP, T_TRANSACTION, T_TRUE, T_TRUNCATE, T_TYPE, T_UNIQUE, T_UPDATE, T_UR, T_USE, T_USING, T_VALUE, T_VALUES, T_VAR, T_VARCHAR, T_VARCHAR2, T_VARYING, T_VOLATILE, T_WHILE, T_WITH, T_WITHOUT, T_WORK, T_XACT_ABORT, T_XML, T_YES, T_ACTIVITY_COUNT, T_CUME_DIST, T_CURRENT_DATE, T_CURRENT_TIMESTAMP, T_CURRENT_USER, T_DENSE_RANK, T_FIRST_VALUE, T_LAG, T_LAST_VALUE, T_LEAD, T_MAX_PART_STRING, T_MIN_PART_STRING, T_MAX_PART_INT, T_MIN_PART_INT, T_MAX_PART_DATE, T_MIN_PART_DATE, T_PART_COUNT, T_PART_LOC, T_RANK, T_ROW_NUMBER, T_STDEV, T_SYSDATE, T_VARIANCE, T_USER, '+', '/', '.', '<', '*', '(', ';', '-', L_ID, L_S_STRING, L_D_STRING, L_INT, L_DEC, L_LABEL} +Syntax error at line 1:27 mismatched input ',' expecting {, '@', '#', '!', T_ACTION, T_ADD2, T_ALL, T_ALLOCATE, T_ALTER, T_AND, T_ANSI_NULLS, T_ANSI_PADDING, T_AS, T_ASC, T_ASSOCIATE, T_AT, T_AUTO_INCREMENT, T_AVG, T_BATCHSIZE, T_BEGIN, T_BETWEEN, T_BIGINT, T_BINARY_DOUBLE, T_BINARY_FLOAT, T_BIT, T_BODY, T_BREAK, T_BULK, T_BY, T_BYTE, T_CALL, T_CALLER, T_CASCADE, T_CASE, T_CASESPECIFIC, T_CAST, T_CHAR, T_CHARACTER, T_CHARSET, T_CLIENT, T_CLOSE, T_CLUSTERED, T_CMP, T_COLLECT, T_COLLECTION, T_COLUMN, T_COMMENT, T_CONSTANT, T_COMMIT, T_COMPRESS, T_CONCAT, T_CONDITION, T_CONSTRAINT, T_CONTINUE, T_COPY, T_COUNT, T_COUNT_BIG, T_CREATE, T_CREATION, T_CREATOR, T_CS, T_CURRENT, T_CURRENT_SCHEMA, T_CURSOR, T_DATABASE, T_DATA, T_DATE, T_DATETIME, T_DAY, T_DAYS, T_DEC, T_DECIMAL, T_DECLARE, T_DEFAULT, T_DEFERRED, T_DEFINED, T_DEFINER, T_DEFINITION, T_DELETE, T_DELIMITED, T_DELIMITER, T_DESC, T_DESCRIBE, T_DIAGNOSTICS, T_DIR, T_DIRECTORY, T_DISTINCT, T_DISTRIBUTE, T_DO, T_DOUBLE, T_DROP, T_DYNAMIC, T_ENABLE, T_END, T_ENGINE, T_ESCAPED, T_EXCEPT, T_EXEC, T_EXECUTE, T_EXCEPTION, T_EXCLUSIVE, T_EXISTS, T_EXIT, T_FALLBACK, T_FALSE, T_FETCH, T_FIELDS, T_FILE, T_FILES, T_FLOAT, T_FOR, T_FOREIGN, T_FORMAT, T_FOUND, T_FROM, T_FULL, T_FUNCTION, T_GET, T_GLOBAL, T_GO, T_GRANT, T_GROUP, T_HANDLER, T_HASH, T_HAVING, T_HDFS, T_HIVE, T_HOST, T_IDENTITY, T_IF, T_IGNORE, T_IMMEDIATE, T_IN, T_INCLUDE, T_INDEX, T_INITRANS, T_INNER, T_INOUT, T_INSERT, T_INT, T_INT2, T_INT4, T_INT8, T_INTEGER, T_INTERSECT, T_INTERVAL, T_INTO, T_INVOKER, T_IS, T_ISOPEN, T_ITEMS, T_JOIN, T_KEEP, T_KEY, T_KEYS, T_LANGUAGE, T_LEAVE, T_LEFT, T_LIKE, T_LIMIT, T_LINES, T_LOCAL, T_LOCATION, T_LOCATOR, T_LOCATORS, T_LOCKS, T_LOG, T_LOGGED, T_LOGGING, T_LOOP, T_MAP, T_MATCHED, T_MAX, T_MAXTRANS, T_MERGE, T_MESSAGE_TEXT, T_MICROSECOND, T_MICROSECONDS, T_MIN, T_MULTISET, T_NCHAR, T_NEW, T_NVARCHAR, T_NO, T_NOCOUNT, T_NOCOMPRESS, T_NOLOGGING, T_NONE, T_NOT, T_NOTFOUND, T_NULL, T_NUMERIC, T_NUMBER, T_OBJECT, T_OFF, T_ON, T_ONLY, T_OPEN, T_OR, T_ORDER, T_OUT, T_OUTER, T_OVER, T_OVERWRITE, T_OWNER, T_PACKAGE, T_PARTITION, T_PCTFREE, T_PCTUSED, T_PRECISION, T_PRESERVE, T_PRIMARY, T_PRINT, T_PROC, T_PROCEDURE, T_QUALIFY, T_QUERY_BAND, T_QUIT, T_QUOTED_IDENTIFIER, T_RAISE, T_REAL, T_REFERENCES, T_REGEXP, T_REPLACE, T_RESIGNAL, T_RESTRICT, T_RESULT, T_RESULT_SET_LOCATOR, T_RETURN, T_RETURNS, T_REVERSE, T_RIGHT, T_RLIKE, T_ROLE, T_ROLLBACK, T_ROW, T_ROWS, T_ROW_COUNT, T_RR, T_RS, T_PWD, T_TRIM, T_SCHEMA, T_SECOND, T_SECONDS, T_SECURITY, T_SEGMENT, T_SEL, T_SELECT, T_SET, T_SESSION, T_SESSIONS, T_SETS, T_SHARE, T_SIGNAL, T_SIMPLE_DOUBLE, T_SIMPLE_FLOAT, T_SMALLDATETIME, T_SMALLINT, T_SQL, T_SQLEXCEPTION, T_SQLINSERT, T_SQLSTATE, T_SQLWARNING, T_STATS, T_STATISTICS, T_STEP, T_STORAGE, T_STORED, T_STRING, T_SUBDIR, T_SUBSTRING, T_SUM, T_SUMMARY, T_SYS_REFCURSOR, T_TABLE, T_TABLESPACE, T_TEMPORARY, T_TERMINATED, T_TEXTIMAGE_ON, T_THEN, T_TIMESTAMP, T_TITLE, T_TO, T_TOP, T_TRANSACTION, T_TRUE, T_TRUNCATE, T_TYPE, T_UNIQUE, T_UPDATE, T_UR, T_USE, T_USING, T_VALUE, T_VALUES, T_VAR, T_VARCHAR, T_VARCHAR2, T_VARYING, T_VOLATILE, T_WHILE, T_WITH, T_WITHOUT, T_WORK, T_XACT_ABORT, T_XML, T_YES, T_ACTIVITY_COUNT, T_CUME_DIST, T_CURRENT_DATE, T_CURRENT_TIME_MILLIS, T_CURRENT_TIMESTAMP, T_CURRENT_USER, T_DENSE_RANK, T_FIRST_VALUE, T_LAG, T_LAST_VALUE, T_LEAD, T_MAX_PART_STRING, T_MIN_PART_STRING, T_MAX_PART_INT, T_MIN_PART_INT, T_MAX_PART_DATE, T_MIN_PART_DATE, T_PART_COUNT, T_PART_LOC, T_RANK, T_ROW_NUMBER, T_STDEV, T_SYSDATE, T_VARIANCE, T_USER, '+', '/', '.', '<', '*', '(', ';', '-', L_ID, L_S_STRING, L_D_STRING, L_INT, L_DEC, L_LABEL} Ln:1 identifier 'CREATE' must be declared. diff --git a/hplsql/src/test/results/local/length.out.txt b/hplsql/src/test/results/local/length.out.txt deleted file mode 100644 index b8626c4cff28..000000000000 --- a/hplsql/src/test/results/local/length.out.txt +++ /dev/null @@ -1 +0,0 @@ -4 diff --git a/hplsql/src/test/results/local/lower.out.txt b/hplsql/src/test/results/local/lower.out.txt deleted file mode 100644 index 8baef1b4abc4..000000000000 --- a/hplsql/src/test/results/local/lower.out.txt +++ /dev/null @@ -1 +0,0 @@ -abc diff --git a/hplsql/src/test/results/local/nvl.out.txt b/hplsql/src/test/results/local/nvl.out.txt deleted file mode 100644 index a111c8575fba..000000000000 --- a/hplsql/src/test/results/local/nvl.out.txt +++ /dev/null @@ -1,4 +0,0 @@ -First non-null -First non-null -First non-null -First non-null diff --git a/hplsql/src/test/results/local/replace.out.txt b/hplsql/src/test/results/local/replace.out.txt deleted file mode 100644 index 2cd3602ff952..000000000000 --- a/hplsql/src/test/results/local/replace.out.txt +++ /dev/null @@ -1 +0,0 @@ -20160303 diff --git a/hplsql/src/test/results/local/sub.out.txt b/hplsql/src/test/results/local/sub.out.txt index d883fcd7d786..8d3b91922fb1 100644 --- a/hplsql/src/test/results/local/sub.out.txt +++ b/hplsql/src/test/results/local/sub.out.txt @@ -1 +1 @@ -2014-12-31 +DATE '2014-12-31' diff --git a/hplsql/src/test/results/local/timestamp.out.txt b/hplsql/src/test/results/local/timestamp.out.txt deleted file mode 100644 index 223b5f492cb2..000000000000 --- a/hplsql/src/test/results/local/timestamp.out.txt +++ /dev/null @@ -1,4 +0,0 @@ -2015-03-03 11:39:31.123 -2015-03-03 11:39:31.123 -2015-03-03 11:39:31 -2015-03-03 11:39:31.123 diff --git a/hplsql/src/test/results/local/timestamp_iso.out.txt b/hplsql/src/test/results/local/timestamp_iso.out.txt index 997df7fffca7..dc697a127d64 100644 --- a/hplsql/src/test/results/local/timestamp_iso.out.txt +++ b/hplsql/src/test/results/local/timestamp_iso.out.txt @@ -1,2 +1 @@ -2015-03-12 00:00:00 -2015-03-12 00:00:00 +TIMESTAMP '2015-03-12 00:00:00' diff --git a/hplsql/src/test/results/local/to_char.out.txt b/hplsql/src/test/results/local/to_char.out.txt index 22e8ceff87b2..651eb14fee42 100644 --- a/hplsql/src/test/results/local/to_char.out.txt +++ b/hplsql/src/test/results/local/to_char.out.txt @@ -1 +1 @@ -2015-04-02 +DATE '2015-04-02' diff --git a/hplsql/src/test/results/local/to_timestamp.out.txt b/hplsql/src/test/results/local/to_timestamp.out.txt index 1ee727873aab..08884ae0cc53 100644 --- a/hplsql/src/test/results/local/to_timestamp.out.txt +++ b/hplsql/src/test/results/local/to_timestamp.out.txt @@ -1,4 +1,4 @@ -2015-04-02 00:00:00 -2015-04-02 00:00:00 -2015-04-02 00:00:00 -2015-04-02 13:51:31 +TIMESTAMP '2015-04-02 00:00:00' +TIMESTAMP '2015-04-02 00:00:00' +TIMESTAMP '2015-04-02 00:00:00' +TIMESTAMP '2015-04-02 13:51:31' diff --git a/hplsql/src/test/results/local/trim.out.txt b/hplsql/src/test/results/local/trim.out.txt deleted file mode 100644 index bbf851d14c6c..000000000000 --- a/hplsql/src/test/results/local/trim.out.txt +++ /dev/null @@ -1 +0,0 @@ -#Hello# diff --git a/iceberg/iceberg-catalog/pom.xml b/iceberg/iceberg-catalog/pom.xml index 3587125bcc67..acdcb8fc20ae 100644 --- a/iceberg/iceberg-catalog/pom.xml +++ b/iceberg/iceberg-catalog/pom.xml @@ -3,7 +3,7 @@ org.apache.hive hive-iceberg - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java index c93ce5455e9f..90eea7618df6 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/CachedClientPool.java @@ -128,7 +128,7 @@ public R run(Action action, boolean retry) static Key extractKey(String cacheKeys, Configuration conf) { // generate key elements in a certain order, so that the Key instances are comparable List elements = Lists.newArrayList(); - elements.add(conf.get(HiveConf.ConfVars.METASTOREURIS.varname, "")); + elements.add(conf.get(HiveConf.ConfVars.METASTORE_URIS.varname, "")); elements.add(conf.get(HiveCatalog.HIVE_CONF_CATALOG, "hive")); if (cacheKeys == null || cacheKeys.isEmpty()) { return Key.of(elements); diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java index 6c98cee6a528..de859a508672 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/HiveCatalog.java @@ -95,11 +95,11 @@ public void initialize(String inputName, Map properties) { } if (properties.containsKey(CatalogProperties.URI)) { - this.conf.set(HiveConf.ConfVars.METASTOREURIS.varname, properties.get(CatalogProperties.URI)); + this.conf.set(HiveConf.ConfVars.METASTORE_URIS.varname, properties.get(CatalogProperties.URI)); } if (properties.containsKey(CatalogProperties.WAREHOUSE_LOCATION)) { - this.conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + this.conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, LocationUtil.stripTrailingSlash(properties.get(CatalogProperties.WAREHOUSE_LOCATION))); } @@ -489,7 +489,7 @@ protected String defaultWarehouseLocation(TableIdentifier tableIdentifier) { } private String databaseLocation(String databaseName) { - String warehouseLocation = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + String warehouseLocation = conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname); Preconditions.checkNotNull( warehouseLocation, "Warehouse location is not set: hive.metastore.warehouse.dir=null"); warehouseLocation = LocationUtil.stripTrailingSlash(warehouseLocation); @@ -563,7 +563,7 @@ Database convertToDatabase(Namespace namespace, Map meta) { public String toString() { return MoreObjects.toStringHelper(this) .add("name", name) - .add("uri", this.conf == null ? "" : this.conf.get(HiveConf.ConfVars.METASTOREURIS.varname)) + .add("uri", this.conf == null ? "" : this.conf.get(HiveConf.ConfVars.METASTORE_URIS.varname)) .toString(); } diff --git a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreLock.java b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreLock.java index f25a35ab7830..c0d9d88ee9fc 100644 --- a/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreLock.java +++ b/iceberg/iceberg-catalog/src/main/java/org/apache/iceberg/hive/MetastoreLock.java @@ -126,7 +126,7 @@ public MetastoreLock(Configuration conf, ClientPool server.serve()); // in Hive3, setting this as a system prop ensures that it will be picked up whenever a new HiveConf is created - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS)); + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, hiveConf.getVar(HiveConf.ConfVars.METASTORE_URIS)); this.clientPool = new HiveClientPool(1, hiveConf); } catch (Exception e) { @@ -229,7 +229,7 @@ public R run(ClientPool.Action action) thro private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf conf) throws Exception { HiveConf serverConf = new HiveConf(conf); - serverConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:" + DERBY_PATH + ";create=true"); + serverConf.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:" + DERBY_PATH + ";create=true"); baseHandler = HMS_HANDLER_CTOR.newInstance("new db based metaserver", serverConf); IHMSHandler handler = GET_BASE_HMS_HANDLER.invoke(serverConf, baseHandler, false); @@ -244,8 +244,8 @@ private TServer newThriftServer(TServerSocket socket, int poolSize, HiveConf con } private void initConf(HiveConf conf, int port) { - conf.set(HiveConf.ConfVars.METASTOREURIS.varname, "thrift://localhost:" + port); - conf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, "file:" + HIVE_WAREHOUSE_DIR.getAbsolutePath()); + conf.set(HiveConf.ConfVars.METASTORE_URIS.varname, "thrift://localhost:" + port); + conf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "file:" + HIVE_WAREHOUSE_DIR.getAbsolutePath()); conf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, "file:" + HIVE_EXTERNAL_WAREHOUSE_DIR.getAbsolutePath()); conf.set(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL.varname, "false"); diff --git a/iceberg/iceberg-handler/pom.xml b/iceberg/iceberg-handler/pom.xml index 397898d2c61b..b639df56eb89 100644 --- a/iceberg/iceberg-handler/pom.xml +++ b/iceberg/iceberg-handler/pom.xml @@ -3,7 +3,7 @@ org.apache.hive hive-iceberg - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/FilesForCommit.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/FilesForCommit.java index 1bc5ea3a6741..2e25f5a8c2e6 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/FilesForCommit.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/FilesForCommit.java @@ -33,29 +33,37 @@ public class FilesForCommit implements Serializable { private final Collection dataFiles; private final Collection deleteFiles; - private Collection referencedDataFiles; + private final Collection replacedDataFiles; + private final Collection referencedDataFiles; public FilesForCommit(Collection dataFiles, Collection deleteFiles) { this(dataFiles, deleteFiles, Collections.emptyList()); } public FilesForCommit(Collection dataFiles, Collection deleteFiles, - Collection referencedDataFiles) { + Collection replacedDataFiles, Collection referencedDataFiles) { this.dataFiles = dataFiles; this.deleteFiles = deleteFiles; + this.replacedDataFiles = replacedDataFiles; this.referencedDataFiles = referencedDataFiles; } - public static FilesForCommit onlyDelete(Collection deleteFiles) { - return new FilesForCommit(Collections.emptyList(), deleteFiles); + public FilesForCommit(Collection dataFiles, Collection deleteFiles, + Collection replacedDataFiles) { + this(dataFiles, deleteFiles, replacedDataFiles, Collections.emptySet()); + } + + public static FilesForCommit onlyDelete(Collection deleteFiles, + Collection referencedDataFiles) { + return new FilesForCommit(Collections.emptyList(), deleteFiles, Collections.emptyList(), referencedDataFiles); } public static FilesForCommit onlyData(Collection dataFiles) { return new FilesForCommit(dataFiles, Collections.emptyList()); } - public static FilesForCommit onlyData(Collection dataFiles, Collection referencedDataFiles) { - return new FilesForCommit(dataFiles, Collections.emptyList(), referencedDataFiles); + public static FilesForCommit onlyData(Collection dataFiles, Collection replacedDataFiles) { + return new FilesForCommit(dataFiles, Collections.emptyList(), replacedDataFiles); } public static FilesForCommit empty() { @@ -70,7 +78,11 @@ public Collection deleteFiles() { return deleteFiles; } - public Collection referencedDataFiles() { + public Collection replacedDataFiles() { + return replacedDataFiles; + } + + public Collection referencedDataFiles() { return referencedDataFiles; } @@ -79,7 +91,7 @@ public Collection allFiles() { } public boolean isEmpty() { - return dataFiles.isEmpty() && deleteFiles.isEmpty() && referencedDataFiles.isEmpty(); + return dataFiles.isEmpty() && deleteFiles.isEmpty() && replacedDataFiles.isEmpty(); } @Override @@ -87,6 +99,7 @@ public String toString() { return MoreObjects.toStringHelper(this) .add("dataFiles", dataFiles.toString()) .add("deleteFiles", deleteFiles.toString()) + .add("replacedDataFiles", replacedDataFiles.toString()) .add("referencedDataFiles", referencedDataFiles.toString()) .toString(); } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergFilterFactory.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergFilterFactory.java index d443672b0e8d..4db6627cbc9c 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergFilterFactory.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergFilterFactory.java @@ -110,9 +110,6 @@ private static Expression translateLeaf(PredicateLeaf leaf) { return in(column, leafToLiteralList(leaf)); case BETWEEN: List icebergLiterals = leafToLiteralList(leaf); - if (icebergLiterals.size() < 2) { - throw new UnsupportedOperationException("Missing leaf literals: " + leaf); - } if (icebergLiterals.size() == 2) { return and(greaterThanOrEqual(column, icebergLiterals.get(0)), lessThanOrEqual(column, icebergLiterals.get(1))); diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java index 540a3b8638c0..94aabe65d433 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergMetaHook.java @@ -43,9 +43,11 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.api.CreateTableRequest; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -122,6 +124,7 @@ import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.types.Conversions; import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.Types; import org.apache.iceberg.util.Pair; import org.apache.iceberg.util.StructProjection; import org.apache.thrift.TException; @@ -148,7 +151,8 @@ public class HiveIcebergMetaHook implements HiveMetaHook { AlterTableType.ADDCOLS, AlterTableType.REPLACE_COLUMNS, AlterTableType.RENAME_COLUMN, AlterTableType.ADDPROPS, AlterTableType.DROPPROPS, AlterTableType.SETPARTITIONSPEC, AlterTableType.UPDATE_COLUMNS, AlterTableType.RENAME, AlterTableType.EXECUTE, AlterTableType.CREATE_BRANCH, - AlterTableType.CREATE_TAG, AlterTableType.DROP_BRANCH, AlterTableType.DROPPARTITION, AlterTableType.DROP_TAG); + AlterTableType.CREATE_TAG, AlterTableType.DROP_BRANCH, AlterTableType.DROPPARTITION, AlterTableType.DROP_TAG, + AlterTableType.COMPACT); private static final List MIGRATION_ALLOWED_SOURCE_FORMATS = ImmutableList.of( FileFormat.PARQUET.name().toLowerCase(), FileFormat.ORC.name().toLowerCase(), @@ -193,6 +197,12 @@ public HiveIcebergMetaHook(Configuration conf) { @Override public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) { + CreateTableRequest request = new CreateTableRequest(hmsTable); + preCreateTable(request); + } + @Override + public void preCreateTable(CreateTableRequest request) { + org.apache.hadoop.hive.metastore.api.Table hmsTable = request.getTable(); if (hmsTable.isTemporary()) { throw new UnsupportedOperationException("Creation of temporary iceberg tables is not supported."); } @@ -233,7 +243,12 @@ public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) // - InputFormatConfig.TABLE_SCHEMA, InputFormatConfig.PARTITION_SPEC takes precedence so the user can override the // Iceberg schema and specification generated by the code - Schema schema = schema(catalogProperties, hmsTable); + Set identifierFields = Optional.ofNullable(request.getPrimaryKeys()) + .map(primaryKeys -> primaryKeys.stream() + .map(SQLPrimaryKey::getColumn_name) + .collect(Collectors.toSet())) + .orElse(Collections.emptySet()); + Schema schema = schema(catalogProperties, hmsTable, identifierFields); PartitionSpec spec = spec(conf, schema, hmsTable); // If there are partition keys specified remove them from the HMS table and add them to the column list @@ -254,6 +269,8 @@ public void preCreateTable(org.apache.hadoop.hive.metastore.api.Table hmsTable) // Set whether the format is ORC, to be used during vectorization. setOrcOnlyFilesParam(hmsTable); + // Remove hive primary key columns from table request, as iceberg doesn't support hive primary key. + request.setPrimaryKeys(null); } @Override @@ -383,7 +400,7 @@ private void doPreAlterTable(org.apache.hadoop.hive.metastore.api.Table hmsTable preAlterTableProperties = new PreAlterTableProperties(); preAlterTableProperties.tableLocation = sd.getLocation(); preAlterTableProperties.format = sd.getInputFormat(); - preAlterTableProperties.schema = schema(catalogProperties, hmsTable); + preAlterTableProperties.schema = schema(catalogProperties, hmsTable, Collections.emptySet()); preAlterTableProperties.partitionKeys = hmsTable.getPartitionKeys(); context.getProperties().put(HiveMetaHook.ALLOW_PARTITION_KEY_CHANGE, "true"); @@ -793,19 +810,40 @@ private static Properties getCatalogProperties(org.apache.hadoop.hive.metastore. return properties; } - private Schema schema(Properties properties, org.apache.hadoop.hive.metastore.api.Table hmsTable) { + private Schema schema(Properties properties, org.apache.hadoop.hive.metastore.api.Table hmsTable, + Set identifierFields) { boolean autoConversion = conf.getBoolean(InputFormatConfig.SCHEMA_AUTO_CONVERSION, false); if (properties.getProperty(InputFormatConfig.TABLE_SCHEMA) != null) { return SchemaParser.fromJson(properties.getProperty(InputFormatConfig.TABLE_SCHEMA)); - } else if (hmsTable.isSetPartitionKeys() && !hmsTable.getPartitionKeys().isEmpty()) { - // Add partitioning columns to the original column list before creating the Iceberg Schema - List cols = Lists.newArrayList(hmsTable.getSd().getCols()); + } + List cols = Lists.newArrayList(hmsTable.getSd().getCols()); + if (hmsTable.isSetPartitionKeys() && !hmsTable.getPartitionKeys().isEmpty()) { cols.addAll(hmsTable.getPartitionKeys()); - return HiveSchemaUtil.convert(cols, autoConversion); - } else { - return HiveSchemaUtil.convert(hmsTable.getSd().getCols(), autoConversion); } + Schema schema = HiveSchemaUtil.convert(cols, autoConversion); + + return getSchemaWithIdentifierFields(schema, identifierFields); + } + + private Schema getSchemaWithIdentifierFields(Schema schema, Set identifierFields) { + if (identifierFields == null || identifierFields.isEmpty()) { + return schema; + } + Set identifierFieldIds = identifierFields.stream() + .map(column -> { + Types.NestedField field = schema.findField(column); + Preconditions.checkNotNull(field, + "Cannot find identifier field ID for the column %s in schema %s", column, schema); + return field.fieldId(); + }) + .collect(Collectors.toSet()); + + List cols = schema.columns().stream() + .map(column -> identifierFieldIds.contains(column.fieldId()) ? column.asRequired() : column) + .collect(Collectors.toList()); + + return new Schema(cols, identifierFieldIds); } private static PartitionSpec spec(Configuration configuration, Schema schema, diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java index 37941c62761a..7f4b9e12c3a2 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputCommitter.java @@ -42,7 +42,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.Context.Operation; +import org.apache.hadoop.hive.ql.Context.RewritePolicy; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.session.SessionStateUtil; import org.apache.hadoop.mapred.JobConf; @@ -62,6 +64,7 @@ import org.apache.iceberg.Snapshot; import org.apache.iceberg.SnapshotRef; import org.apache.iceberg.Table; +import org.apache.iceberg.Transaction; import org.apache.iceberg.exceptions.NotFoundException; import org.apache.iceberg.expressions.Expressions; import org.apache.iceberg.hadoop.Util; @@ -76,6 +79,7 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.relocated.com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.iceberg.util.Tasks; import org.slf4j.Logger; @@ -139,16 +143,18 @@ public void commitTask(TaskAttemptContext originalContext) throws IOException { String fileForCommitLocation = generateFileForCommitLocation(table.location(), jobConf, attemptID.getJobID(), attemptID.getTaskID().getId()); if (writers.get(output) != null) { - Collection dataFiles = Lists.newArrayList(); - Collection deleteFiles = Lists.newArrayList(); - Collection referencedDataFiles = Lists.newArrayList(); + List dataFiles = Lists.newArrayList(); + List deleteFiles = Lists.newArrayList(); + List replacedDataFiles = Lists.newArrayList(); + Set referencedDataFiles = Sets.newHashSet(); for (HiveIcebergWriter writer : writers.get(output)) { FilesForCommit files = writer.files(); dataFiles.addAll(files.dataFiles()); deleteFiles.addAll(files.deleteFiles()); + replacedDataFiles.addAll(files.replacedDataFiles()); referencedDataFiles.addAll(files.referencedDataFiles()); } - createFileForCommit(new FilesForCommit(dataFiles, deleteFiles, referencedDataFiles), + createFileForCommit(new FilesForCommit(dataFiles, deleteFiles, replacedDataFiles, referencedDataFiles), fileForCommitLocation, table.io()); } else { LOG.info("CommitTask found no writer for specific table: {}, attemptID: {}", output, attemptID); @@ -409,7 +415,8 @@ private void commitTable(FileIO io, ExecutorService executor, OutputTable output } List dataFiles = Lists.newArrayList(); List deleteFiles = Lists.newArrayList(); - List referencedDataFiles = Lists.newArrayList(); + List replacedDataFiles = Lists.newArrayList(); + Set referencedDataFiles = Sets.newHashSet(); Table table = null; String branchName = null; @@ -436,10 +443,11 @@ private void commitTable(FileIO io, ExecutorService executor, OutputTable output numTasks, executor, outputTable.table.location(), jobContext, io, true); dataFiles.addAll(writeResults.dataFiles()); deleteFiles.addAll(writeResults.deleteFiles()); + replacedDataFiles.addAll(writeResults.replacedDataFiles()); referencedDataFiles.addAll(writeResults.referencedDataFiles()); } - FilesForCommit filesForCommit = new FilesForCommit(dataFiles, deleteFiles, referencedDataFiles); + FilesForCommit filesForCommit = new FilesForCommit(dataFiles, deleteFiles, replacedDataFiles, referencedDataFiles); long startTime = System.currentTimeMillis(); if (Operation.IOW != operation) { @@ -453,7 +461,13 @@ private void commitTable(FileIO io, ExecutorService executor, OutputTable output commitWrite(table, branchName, snapshotId, startTime, filesForCommit, operation); } } else { - commitOverwrite(table, branchName, startTime, filesForCommit); + + RewritePolicy rewritePolicy = RewritePolicy.fromString(outputTable.jobContexts.stream() + .findAny() + .map(x -> x.getJobConf().get(ConfVars.REWRITE_POLICY.varname)) + .orElse(RewritePolicy.DEFAULT.name())); + + commitOverwrite(table, branchName, startTime, filesForCommit, rewritePolicy); } } @@ -476,9 +490,9 @@ private Long getSnapshotId(Table table, String branchName) { private void commitWrite(Table table, String branchName, Long snapshotId, long startTime, FilesForCommit results, Operation operation) { - if (!results.referencedDataFiles().isEmpty()) { + if (!results.replacedDataFiles().isEmpty()) { OverwriteFiles write = table.newOverwrite(); - results.referencedDataFiles().forEach(write::deleteFile); + results.replacedDataFiles().forEach(write::deleteFile); results.dataFiles().forEach(write::addFile); if (StringUtils.isNotEmpty(branchName)) { @@ -488,6 +502,7 @@ private void commitWrite(Table table, String branchName, Long snapshotId, long s write.validateFromSnapshot(snapshotId); } write.validateNoConflictingData(); + write.validateNoConflictingDeletes(); write.commit(); return; } @@ -514,6 +529,7 @@ private void commitWrite(Table table, String branchName, Long snapshotId, long s write.validateDeletedFiles(); write.validateNoConflictingDeleteFiles(); } + write.validateDataFilesExist(results.referencedDataFiles()); write.validateNoConflictingDataFiles(); write.commit(); } @@ -532,16 +548,25 @@ private void commitWrite(Table table, String branchName, Long snapshotId, long s * @param table The table we are changing * @param startTime The start time of the commit - used only for logging * @param results The object containing the new files + * @param rewritePolicy The rewrite policy to use for the insert overwrite commit */ - private void commitOverwrite(Table table, String branchName, long startTime, FilesForCommit results) { + private void commitOverwrite(Table table, String branchName, long startTime, FilesForCommit results, + RewritePolicy rewritePolicy) { Preconditions.checkArgument(results.deleteFiles().isEmpty(), "Can not handle deletes with overwrite"); if (!results.dataFiles().isEmpty()) { - ReplacePartitions overwrite = table.newReplacePartitions(); + Transaction transaction = table.newTransaction(); + if (rewritePolicy == RewritePolicy.ALL_PARTITIONS) { + DeleteFiles delete = transaction.newDelete(); + delete.deleteFromRowFilter(Expressions.alwaysTrue()); + delete.commit(); + } + ReplacePartitions overwrite = transaction.newReplacePartitions(); results.dataFiles().forEach(overwrite::addFile); if (StringUtils.isNotEmpty(branchName)) { overwrite.toBranch(HiveUtils.getTableSnapshotRef(branchName)); } overwrite.commit(); + transaction.commitTransaction(); LOG.info("Overwrite commit took {} ms for table: {} with {} file(s)", System.currentTimeMillis() - startTime, table, results.dataFiles().size()); } else if (table.spec().isUnpartitioned()) { @@ -642,7 +667,8 @@ private static FilesForCommit collectResults(int numTasks, ExecutorService execu // starting from 0. Collection dataFiles = new ConcurrentLinkedQueue<>(); Collection deleteFiles = new ConcurrentLinkedQueue<>(); - Collection referencedDataFiles = new ConcurrentLinkedQueue<>(); + Collection replacedDataFiles = new ConcurrentLinkedQueue<>(); + Collection referencedDataFiles = new ConcurrentLinkedQueue<>(); Tasks.range(numTasks) .throwFailureWhenFinished(throwOnFailure) .executeWith(executor) @@ -652,11 +678,11 @@ private static FilesForCommit collectResults(int numTasks, ExecutorService execu FilesForCommit files = readFileForCommit(taskFileName, io); dataFiles.addAll(files.dataFiles()); deleteFiles.addAll(files.deleteFiles()); + replacedDataFiles.addAll(files.replacedDataFiles()); referencedDataFiles.addAll(files.referencedDataFiles()); - }); - return new FilesForCommit(dataFiles, deleteFiles, referencedDataFiles); + return new FilesForCommit(dataFiles, deleteFiles, replacedDataFiles, referencedDataFiles); } /** @@ -669,7 +695,7 @@ private static FilesForCommit collectResults(int numTasks, ExecutorService execu */ @VisibleForTesting static String generateJobLocation(String location, Configuration conf, JobID jobId) { - String queryId = conf.get(HiveConf.ConfVars.HIVEQUERYID.varname); + String queryId = conf.get(HiveConf.ConfVars.HIVE_QUERY_ID.varname); return location + "/temp/" + queryId + "-" + jobId; } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java index 7c625543b780..508e3459269b 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergOutputFormat.java @@ -72,11 +72,12 @@ private static HiveIcebergWriter writer(JobConf jc) { setWriterLevelConfiguration(jc, table); return WriterBuilder.builderFor(table) - .queryId(jc.get(HiveConf.ConfVars.HIVEQUERYID.varname)) + .queryId(jc.get(HiveConf.ConfVars.HIVE_QUERY_ID.varname)) .tableName(tableName) .attemptID(taskAttemptID) .poolSize(poolSize) .operation(HiveCustomStorageHandlerUtils.getWriteOperation(jc, tableName)) + .isFanoutEnabled(!HiveCustomStorageHandlerUtils.getWriteOperationIsSorted(jc, tableName)) .build(); } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java index 130b7186221d..548d33f7d93d 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergSerDe.java @@ -146,8 +146,8 @@ public void initialize(@Nullable Configuration configuration, Properties serDePr // Currently ClusteredWriter is used which requires that records are ordered by partition keys. // Here we ensure that SortedDynPartitionOptimizer will kick in and do the sorting. // TODO: remove once we have both Fanout and ClusteredWriter available: HIVE-25948 - HiveConf.setIntVar(configuration, HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, 1); - HiveConf.setVar(configuration, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setIntVar(configuration, HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, 1); + HiveConf.setVar(configuration, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); try { this.inspector = IcebergObjectInspector.create(projectedSchema); } catch (Exception e) { diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java index 5f4f97b9f72b..cd50aa929c08 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java @@ -38,7 +38,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.BiFunction; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.commons.collections.MapUtils; @@ -55,6 +54,7 @@ import org.apache.hadoop.hive.common.type.Timestamp; import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaHook; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -67,16 +67,17 @@ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Context.Operation; +import org.apache.hadoop.hive.ql.Context.RewritePolicy; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.ddl.table.misc.properties.AlterTableSetPropertiesDesc; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FetchOperator; -import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.IOConstants; @@ -120,7 +121,6 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.InputFormat; @@ -217,26 +217,6 @@ public class HiveIcebergStorageHandler implements HiveStoragePredicateHandler, H public static final String TABLE_DEFAULT_LOCATION = "TABLE_DEFAULT_LOCATION"; - /** - * Function template for producing a custom sort expression function: - * Takes the source column index and the bucket count to creat a function where Iceberg bucket UDF is used to build - * the sort expression, e.g. iceberg_bucket(_col2, 5) - */ - private static final transient BiFunction, ExprNodeDesc>> - BUCKET_SORT_EXPR = - (idx, bucket) -> cols -> { - try { - ExprNodeDesc icebergBucketSourceCol = cols.get(idx); - return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergBucket(), "iceberg_bucket", - Lists.newArrayList( - icebergBucketSourceCol, - new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, bucket) - )); - } catch (UDFArgumentException e) { - throw new RuntimeException(e); - } - }; - private static final List ACID_VIRTUAL_COLS = ImmutableList.of(VirtualColumn.PARTITION_SPEC_ID, VirtualColumn.PARTITION_HASH, VirtualColumn.FILE_PATH, VirtualColumn.ROW_POSITION); private static final List ACID_VIRTUAL_COLS_AS_FIELD_SCHEMA = ACID_VIRTUAL_COLS.stream() @@ -278,6 +258,13 @@ public void configureInputJobProperties(TableDesc tableDesc, Map overlayTableProperties(conf, tableDesc, map); // Until the vectorized reader can handle delete files, let's fall back to non-vector mode for V2 tables fallbackToNonVectorizedModeBasedOnProperties(tableDesc.getProperties()); + + boolean allowDataFilesWithinTableLocationOnly = + conf.getBoolean(HiveConf.ConfVars.HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY.varname, + HiveConf.ConfVars.HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY.defaultBoolVal); + + map.put(HiveConf.ConfVars.HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY.varname, + String.valueOf(allowDataFilesWithinTableLocationOnly)); } @Override @@ -684,8 +671,8 @@ public DynamicPartitionCtx createDPContext( Table table = IcebergTableUtil.getTable(conf, tableDesc.getProperties()); DynamicPartitionCtx dpCtx = new DynamicPartitionCtx(Maps.newLinkedHashMap(), - hiveConf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - hiveConf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + hiveConf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME), + hiveConf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS_PER_NODE)); List, ExprNodeDesc>> customSortExprs = Lists.newLinkedList(); dpCtx.setCustomSortExpressions(customSortExprs); @@ -715,6 +702,7 @@ public DynamicPartitionCtx createDPContext( addCustomSortExpr(table, hmsTable, writeOperation, customSortExprs, getSortTransformSpec(table)); } + dpCtx.setHasCustomSortExprs(!customSortExprs.isEmpty()); return dpCtx; } @@ -731,14 +719,9 @@ private void addCustomSortExpr(Table table, org.apache.hadoop.hive.ql.metadata. int offset = (shouldOverwrite(hmsTable, writeOperation) ? ACID_VIRTUAL_COLS_AS_FIELD_SCHEMA : acidSelectColumns(hmsTable, writeOperation)).size(); - for (TransformSpec spec : transformSpecs) { - int order = fieldOrderMap.get(spec.getColumnName()); - if (TransformSpec.TransformType.BUCKET.equals(spec.getTransformType())) { - customSortExprs.add(BUCKET_SORT_EXPR.apply(order + offset, spec.getTransformParam().get())); - } else { - customSortExprs.add(cols -> cols.get(order + offset).clone()); - } - } + customSortExprs.addAll(transformSpecs.stream().map(spec -> + IcebergTransformSortFunctionUtil.getCustomSortExprs(spec, fieldOrderMap.get(spec.getColumnName()) + offset) + ).collect(Collectors.toList())); } @Override @@ -1085,7 +1068,7 @@ static String encodeString(String rawString) { String getPathForAuth(String locationProperty) { return getPathForAuth(locationProperty, - SessionStateUtil.getProperty(conf, hive_metastoreConstants.DEFAULT_TABLE_LOCATION).orElse(null)); + SessionStateUtil.getProperty(conf, SessionStateUtil.DEFAULT_TABLE_LOCATION).orElse(null)); } String getPathForAuth(String locationProperty, String defaultTableLocation) { @@ -1127,6 +1110,11 @@ public void validateSinkDesc(FileSinkDesc sinkDesc) throws SemanticException { // If the table is empty we don't have any danger that some data can get lost. return; } + if (RewritePolicy.fromString(conf.get(ConfVars.REWRITE_POLICY.varname, RewritePolicy.DEFAULT.name())) == + RewritePolicy.ALL_PARTITIONS) { + // Table rewriting has special logic as part of IOW that handles the case when table had a partition evolution + return; + } if (IcebergTableUtil.isBucketed(table)) { throw new SemanticException("Cannot perform insert overwrite query on bucket partitioned Iceberg table."); } @@ -1661,6 +1649,11 @@ public void setTableParametersForCTLT(org.apache.hadoop.hive.ql.metadata.Table t } } + @Override + public void setTableLocationForCTAS(CreateTableDesc desc, String location) { + desc.setLocation(location); + } + @Override public Map getNativeProperties(org.apache.hadoop.hive.ql.metadata.Table table) { Table origTable = IcebergTableUtil.getTable(conf, table.getTTable()); diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java index a453e5ea723a..510f562922ba 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveTableUtil.java @@ -234,7 +234,7 @@ public static RemoteIterator getFilesIterator(Path path, Conf } static String generateTableObjectLocation(String tableLocation, Configuration conf) { - return tableLocation + "/temp/" + conf.get(HiveConf.ConfVars.HIVEQUERYID.varname) + TABLE_EXTENSION; + return tableLocation + "/temp/" + conf.get(HiveConf.ConfVars.HIVE_QUERY_ID.varname) + TABLE_EXTENSION; } static void createFileForTableObject(Table table, Configuration conf) { @@ -305,7 +305,7 @@ static JobConf getPartJobConf(Configuration confs, org.apache.hadoop.hive.ql.met job.set(ColumnProjectionUtils.READ_COLUMN_NAMES_CONF_STR, Constants.ICEBERG_PARTITION_TABLE_SCHEMA); job.set(InputFormatConfig.TABLE_LOCATION, tbl.getPath().toString()); job.set(InputFormatConfig.TABLE_IDENTIFIER, tbl.getFullyQualifiedName() + ".partitions"); - HiveConf.setVar(job, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, Constants.DELIMITED_JSON_SERDE); + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_FETCH_OUTPUT_SERDE, Constants.DELIMITED_JSON_SERDE); HiveConf.setBoolVar(job, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); return job; } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTransformSortFunctionUtil.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTransformSortFunctionUtil.java new file mode 100644 index 000000000000..5373a4d903da --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/IcebergTransformSortFunctionUtil.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.hive; + +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Function; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.parse.TransformSpec; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.iceberg.mr.hive.udf.GenericUDFIcebergBucket; +import org.apache.iceberg.mr.hive.udf.GenericUDFIcebergDay; +import org.apache.iceberg.mr.hive.udf.GenericUDFIcebergHour; +import org.apache.iceberg.mr.hive.udf.GenericUDFIcebergMonth; +import org.apache.iceberg.mr.hive.udf.GenericUDFIcebergTruncate; +import org.apache.iceberg.mr.hive.udf.GenericUDFIcebergYear; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; + +/** + * A utility class which provides Iceberg transform sort functions. + */ +public final class IcebergTransformSortFunctionUtil { + + private IcebergTransformSortFunctionUtil() { + // not called + } + + /** + * Function template for producing a custom sort expression function: + * Takes the source column index and the bucket count to create a function where Iceberg transform UDF is used to + * build the sort expression, e.g. iceberg_bucket(_col2, 5) + */ + private static final transient BiFunction, ExprNodeDesc>> + BUCKET_SORT_EXPR = + (idx, bucket) -> cols -> { + try { + ExprNodeDesc icebergBucketSourceCol = cols.get(idx); + return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergBucket(), "iceberg_bucket", + Lists.newArrayList( + icebergBucketSourceCol, + new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, bucket) + )); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + }; + + private static final transient BiFunction, ExprNodeDesc>> + TRUNCATE_SORT_EXPR = + (idx, truncateLength) -> cols -> { + try { + ExprNodeDesc icebergTruncateSourceCol = cols.get(idx); + return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergTruncate(), "iceberg_truncate", + Lists.newArrayList( + icebergTruncateSourceCol, + new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, truncateLength) + )); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + }; + + private static final transient Function, ExprNodeDesc>> + YEAR_SORT_EXPR = + idx -> cols -> { + try { + ExprNodeDesc icebergYearSourceCol = cols.get(idx); + return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergYear(), "iceberg_year", + Lists.newArrayList( + icebergYearSourceCol + )); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + }; + + private static final transient Function, ExprNodeDesc>> + MONTH_SORT_EXPR = + idx -> cols -> { + try { + ExprNodeDesc icebergMonthSourceCol = cols.get(idx); + return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergMonth(), "iceberg_month", + Lists.newArrayList( + icebergMonthSourceCol + )); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + }; + + private static final transient Function, ExprNodeDesc>> + DAY_SORT_EXPR = + idx -> cols -> { + try { + ExprNodeDesc icebergDaySourceCol = cols.get(idx); + return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergDay(), "iceberg_day", + Lists.newArrayList( + icebergDaySourceCol + )); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + }; + + private static final transient Function, ExprNodeDesc>> + HOUR_SORT_EXPR = + idx -> cols -> { + try { + ExprNodeDesc icebergHourSourceCol = cols.get(idx); + return ExprNodeGenericFuncDesc.newInstance(new GenericUDFIcebergHour(), "iceberg_hour", + Lists.newArrayList( + icebergHourSourceCol + )); + } catch (UDFArgumentException e) { + throw new RuntimeException(e); + } + }; + + public static Function, ExprNodeDesc> getCustomSortExprs(TransformSpec spec, int index) { + switch (spec.getTransformType()) { + case BUCKET: + return BUCKET_SORT_EXPR.apply(index, spec.getTransformParam().get()); + case TRUNCATE: + return TRUNCATE_SORT_EXPR.apply(index, spec.getTransformParam().get()); + case YEAR: + return YEAR_SORT_EXPR.apply(index); + case MONTH: + return MONTH_SORT_EXPR.apply(index); + case DAY: + return DAY_SORT_EXPR.apply(index); + case HOUR: + return HOUR_SORT_EXPR.apply(index); + default: + return cols -> cols.get(index).clone(); + } + } + +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergCompactionService.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergCompactionService.java new file mode 100644 index 000000000000..7251f6965bce --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergCompactionService.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.iceberg.mr.hive.compaction; + +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorContext; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorPipeline; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorUtil; +import org.apache.hadoop.hive.ql.txn.compactor.service.CompactionService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class IcebergCompactionService extends CompactionService { + private static final String CLASS_NAME = IcebergCompactionService.class.getName(); + private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME); + + public IcebergCompactionService() { + } + + public Boolean compact(Table table, CompactionInfo ci) throws Exception { + + if (!ci.isMajorCompaction()) { + ci.errorMessage = "Presently Iceberg tables support only Major compaction"; + LOG.error(ci.errorMessage + " Compaction info: {}", ci); + try { + msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); + } catch (Throwable tr) { + LOG.error("Caught an exception while trying to mark compaction {} as failed: {}", ci, tr); + } + return false; + } + CompactorUtil.checkInterrupt(CLASS_NAME); + + if (ci.runAs == null) { + ci.runAs = TxnUtils.findUserToRunAs(table.getSd().getLocation(), table, conf); + } + + try { + CompactorPipeline compactorPipeline = compactorFactory.getCompactorPipeline(table, conf, ci, msc); + computeStats = collectGenericStats; + + LOG.info("Starting " + ci.type.toString() + " compaction for " + ci.getFullPartitionName() + ", id:" + + ci.id + " with compute stats set to " + computeStats); + + CompactorContext compactorContext = new CompactorContext(conf, table, ci); + compactorPipeline.execute(compactorContext); + + LOG.info("Completed " + ci.type.toString() + " compaction for " + ci.getFullPartitionName() + + ", marking as compacted."); + msc.markCleaned(CompactionInfo.compactionInfoToStruct(ci)); + + } catch (Throwable e) { + computeStats = false; + throw e; + } + + return true; + } + + @Override + public void cleanupResultDirs(CompactionInfo ci) { + + } +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergMajorQueryCompactor.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergMajorQueryCompactor.java new file mode 100644 index 000000000000..e3dba519dc96 --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/compaction/IcebergMajorQueryCompactor.java @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.hive.compaction; + +import java.io.IOException; +import java.util.Map; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.ql.Context.RewritePolicy; +import org.apache.hadoop.hive.ql.DriverUtils; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorContext; +import org.apache.hadoop.hive.ql.txn.compactor.QueryCompactor; +import org.apache.hive.iceberg.org.apache.orc.storage.common.TableName; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class IcebergMajorQueryCompactor extends QueryCompactor { + + private static final Logger LOG = LoggerFactory.getLogger(IcebergMajorQueryCompactor.class.getName()); + + @Override + public boolean run(CompactorContext context) throws IOException, HiveException, InterruptedException { + + String compactTableName = TableName.getDbTable(context.getTable().getDbName(), context.getTable().getTableName()); + Map tblProperties = context.getTable().getParameters(); + LOG.debug("Initiating compaction for the {} table", compactTableName); + + String compactionQuery = String.format("insert overwrite table %s select * from % { + void apply(T argument) throws HiveException; + } + + private transient UDFEvalFunction evaluator; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + if (arguments.length != 1) { + throw new UDFArgumentLengthException( + "ICEBERG_DAY requires 1 arguments (value), but got " + arguments.length); + } + + if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) { + throw new UDFArgumentException( + "ICEBERG_DAY first argument takes primitive types, got " + argumentOI.getTypeName()); + } + argumentOI = (PrimitiveObjectInspector) arguments[0]; + + PrimitiveObjectInspector.PrimitiveCategory inputType = argumentOI.getPrimitiveCategory(); + ObjectInspector outputOI; + switch (inputType) { + case DATE: + converter = new PrimitiveObjectInspectorConverter.DateConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableDateObjectInspector); + Function dateTransform = Transforms.day().bind(Types.DateType.get()); + evaluator = arg -> { + DateWritableV2 val = (DateWritableV2) converter.convert(arg.get()); + result.set(dateTransform.apply(val.getDays())); + }; + break; + + case TIMESTAMP: + converter = new PrimitiveObjectInspectorConverter.TimestampConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampObjectInspector); + Function timestampTransform = Transforms.day().bind(Types.TimestampType.withoutZone()); + evaluator = arg -> { + TimestampWritableV2 val = (TimestampWritableV2) converter.convert(arg.get()); + result.set(timestampTransform.apply(val.getNanos() / 1000L)); + }; + break; + + case TIMESTAMPLOCALTZ: + converter = new PrimitiveObjectInspectorConverter.TimestampLocalTZConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampTZObjectInspector); + Function timestampLocalTzTransform = Transforms.day().bind(Types.TimestampType.withZone()); + evaluator = arg -> { + TimestampLocalTZWritable val = (TimestampLocalTZWritable) converter.convert(arg.get()); + result.set(timestampLocalTzTransform.apply(val.getNanos() / 1000L)); + }; + break; + + default: + throw new UDFArgumentException( + " ICEBERG_DAY() only takes DATE/TIMESTAMP/TIMESTAMPLOCALTZ" + + " types as first argument, got " + inputType); + } + outputOI = PrimitiveObjectInspectorFactory.writableIntObjectInspector; + return outputOI; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + DeferredObject argument = arguments[0]; + if (argument == null || argument.get() == null) { + return null; + } else { + evaluator.apply(argument); + } + return result; + } + + @Override + public String getDisplayString(String[] children) { + return getStandardDisplayString("iceberg_day", children); + } + +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergHour.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergHour.java new file mode 100644 index 000000000000..9457d56e22cf --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergHour.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.iceberg.mr.hive.udf; + +import java.util.function.Function; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable; +import org.apache.hadoop.hive.serde2.io.TimestampWritableV2; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.IntWritable; +import org.apache.iceberg.transforms.Transforms; +import org.apache.iceberg.types.Types; + +/** + * GenericUDFIcebergHour - UDF that wraps around Iceberg's hour transform function + */ +@Description(name = "iceberg_hour", + value = "_FUNC_(value) - " + + "Returns the bucket value calculated by Iceberg hour transform function ", + extended = "Example:\n > SELECT _FUNC_('2023-01-01 11:00:57');\n 11") +public class GenericUDFIcebergHour extends GenericUDF { + private final IntWritable result = new IntWritable(); + private transient PrimitiveObjectInspector argumentOI; + private transient ObjectInspectorConverters.Converter converter; + + @FunctionalInterface + private interface UDFEvalFunction { + void apply(T argument) throws HiveException; + } + + private transient UDFEvalFunction evaluator; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + if (arguments.length != 1) { + throw new UDFArgumentLengthException( + "ICEBERG_YEAR requires 1 arguments (value), but got " + arguments.length); + } + + if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) { + throw new UDFArgumentException( + "ICEBERG_YEAR first argument takes primitive types, got " + argumentOI.getTypeName()); + } + argumentOI = (PrimitiveObjectInspector) arguments[0]; + + PrimitiveObjectInspector.PrimitiveCategory inputType = argumentOI.getPrimitiveCategory(); + ObjectInspector outputOI; + switch (inputType) { + case TIMESTAMP: + converter = new PrimitiveObjectInspectorConverter.TimestampConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampObjectInspector); + Function timestampTransform = Transforms.hour().bind(Types.TimestampType.withoutZone()); + evaluator = arg -> { + TimestampWritableV2 val = (TimestampWritableV2) converter.convert(arg.get()); + result.set(timestampTransform.apply(val.getNanos() / 1000L)); + }; + break; + + case TIMESTAMPLOCALTZ: + converter = new PrimitiveObjectInspectorConverter.TimestampLocalTZConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampTZObjectInspector); + Function timestampLocalTzTransform = Transforms.hour().bind(Types.TimestampType.withZone()); + evaluator = arg -> { + TimestampLocalTZWritable val = (TimestampLocalTZWritable) converter.convert(arg.get()); + result.set(timestampLocalTzTransform.apply(val.getNanos() / 1000L)); + }; + break; + + default: + throw new UDFArgumentException( + " ICEBERG_HOUR() only takes TIMESTAMP/TIMESTAMPLOCALTZ" + + " types as first argument, got " + inputType); + } + outputOI = PrimitiveObjectInspectorFactory.writableIntObjectInspector; + return outputOI; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + DeferredObject argument = arguments[0]; + if (argument == null || argument.get() == null) { + return null; + } else { + evaluator.apply(argument); + } + return result; + } + + @Override + public String getDisplayString(String[] children) { + return getStandardDisplayString("iceberg_hour", children); + } + +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergMonth.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergMonth.java new file mode 100644 index 000000000000..196f5dc6d55d --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergMonth.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.iceberg.mr.hive.udf; + +import java.util.function.Function; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.io.DateWritableV2; +import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable; +import org.apache.hadoop.hive.serde2.io.TimestampWritableV2; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.IntWritable; +import org.apache.iceberg.transforms.Transforms; +import org.apache.iceberg.types.Types; + +/** + * GenericUDFIcebergMonth - UDF that wraps around Iceberg's month transform function + */ +@Description(name = "iceberg_month", + value = "_FUNC_(value) - " + + "Returns the bucket value calculated by Iceberg month transform function ", + extended = "Example:\n > SELECT _FUNC_('2023-01-01');\n 1") +public class GenericUDFIcebergMonth extends GenericUDF { + private final IntWritable result = new IntWritable(); + private transient PrimitiveObjectInspector argumentOI; + private transient ObjectInspectorConverters.Converter converter; + + @FunctionalInterface + private interface UDFEvalFunction { + void apply(T argument) throws HiveException; + } + + private transient UDFEvalFunction evaluator; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + if (arguments.length != 1) { + throw new UDFArgumentLengthException( + "ICEBERG_MONTH requires 1 arguments (value), but got " + arguments.length); + } + + if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) { + throw new UDFArgumentException( + "ICEBERG_MONTH first argument takes primitive types, got " + argumentOI.getTypeName()); + } + argumentOI = (PrimitiveObjectInspector) arguments[0]; + + PrimitiveObjectInspector.PrimitiveCategory inputType = argumentOI.getPrimitiveCategory(); + ObjectInspector outputOI; + switch (inputType) { + case DATE: + converter = new PrimitiveObjectInspectorConverter.DateConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableDateObjectInspector); + Function dateTransform = Transforms.month().bind(Types.DateType.get()); + evaluator = arg -> { + DateWritableV2 val = (DateWritableV2) converter.convert(arg.get()); + result.set(dateTransform.apply(val.getDays())); + }; + break; + + case TIMESTAMP: + converter = new PrimitiveObjectInspectorConverter.TimestampConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampObjectInspector); + Function timestampTransform = Transforms.month().bind(Types.TimestampType.withoutZone()); + evaluator = arg -> { + TimestampWritableV2 val = (TimestampWritableV2) converter.convert(arg.get()); + result.set(timestampTransform.apply(val.getNanos() / 1000L)); + }; + break; + + case TIMESTAMPLOCALTZ: + converter = new PrimitiveObjectInspectorConverter.TimestampLocalTZConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampTZObjectInspector); + Function timestampLocalTzTransform = Transforms.month().bind(Types.TimestampType.withZone()); + evaluator = arg -> { + TimestampLocalTZWritable val = (TimestampLocalTZWritable) converter.convert(arg.get()); + result.set(timestampLocalTzTransform.apply(val.getNanos() / 1000L)); + }; + break; + + default: + throw new UDFArgumentException( + " ICEBERG_MONTH() only takes DATE/TIMESTAMP/TIMESTAMPLOCALTZ" + + " types as first argument, got " + inputType); + } + outputOI = PrimitiveObjectInspectorFactory.writableIntObjectInspector; + return outputOI; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + DeferredObject argument = arguments[0]; + if (argument == null || argument.get() == null) { + return null; + } else { + evaluator.apply(argument); + } + return result; + } + + @Override + public String getDisplayString(String[] children) { + return getStandardDisplayString("iceberg_month", children); + } + +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergTruncate.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergTruncate.java new file mode 100644 index 000000000000..bc0b63fefe8d --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergTruncate.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.iceberg.mr.hive.udf; + +import java.math.BigDecimal; +import java.util.function.Function; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantIntObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.DoubleWritable; +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Text; +import org.apache.iceberg.transforms.Transforms; +import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.Types; + +/** + * GenericUDFIcebergTruncate - UDF that wraps around Iceberg's truncate transform function + */ +@Description(name = "iceberg_truncate", + value = "_FUNC_(value, truncateLength) - " + + "Returns the bucket value calculated by Iceberg bucket transform function ", + extended = "Example:\n > SELECT _FUNC_('abcdefgh', 5);\n abcde") +public class GenericUDFIcebergTruncate extends GenericUDF { + private final Text result = new Text(); + private int truncateLength = 0; + private transient PrimitiveObjectInspector argumentOI; + private transient ObjectInspectorConverters.Converter converter; + + @FunctionalInterface + private interface UDFEvalFunction { + void apply(T argument) throws HiveException; + } + + private transient UDFEvalFunction evaluator; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + if (arguments.length != 2) { + throw new UDFArgumentLengthException( + "ICEBERG_BUCKET requires 2 arguments (value, bucketCount), but got " + arguments.length); + } + + truncateLength = getTruncateLength(arguments[1]); + + if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) { + throw new UDFArgumentException( + "ICEBERG_BUCKET first argument takes primitive types, got " + argumentOI.getTypeName()); + } + argumentOI = (PrimitiveObjectInspector) arguments[0]; + + PrimitiveObjectInspector.PrimitiveCategory inputType = argumentOI.getPrimitiveCategory(); + ObjectInspector outputOI; + switch (inputType) { + case CHAR: + case VARCHAR: + case STRING: + converter = new PrimitiveObjectInspectorConverter.StringConverter(argumentOI); + Function stringTransform = Transforms.truncate(truncateLength).bind(Types.StringType.get()); + evaluator = arg -> { + String val = (String) converter.convert(arg.get()); + result.set(String.valueOf(stringTransform.apply(val))); + }; + break; + + case INT: + converter = new PrimitiveObjectInspectorConverter.IntConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableIntObjectInspector); + Function intTransform = Transforms.truncate(truncateLength).bind(Types.IntegerType.get()); + evaluator = arg -> { + IntWritable val = (IntWritable) converter.convert(arg.get()); + result.set(String.valueOf(intTransform.apply(val.get()))); + }; + break; + + case LONG: + converter = new PrimitiveObjectInspectorConverter.LongConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableLongObjectInspector); + Function longTransform = Transforms.truncate(truncateLength).bind(Types.LongType.get()); + evaluator = arg -> { + LongWritable val = (LongWritable) converter.convert(arg.get()); + result.set(String.valueOf(longTransform.apply(val.get()))); + }; + break; + + case DECIMAL: + DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(argumentOI); + Type.PrimitiveType decimalIcebergType = Types.DecimalType.of(decimalTypeInfo.getPrecision(), + decimalTypeInfo.getScale()); + + converter = new PrimitiveObjectInspectorConverter.HiveDecimalConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector); + Function bigDecimalTransform = Transforms.truncate(truncateLength).bind(decimalIcebergType); + evaluator = arg -> { + HiveDecimalWritable val = (HiveDecimalWritable) converter.convert(arg.get()); + result.set(((BigDecimal) bigDecimalTransform.apply(val.getHiveDecimal().bigDecimalValue())).toPlainString()); + }; + break; + + case FLOAT: + converter = new PrimitiveObjectInspectorConverter.FloatConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableFloatObjectInspector); + Function floatTransform = Transforms.truncate(truncateLength).bind(Types.FloatType.get()); + evaluator = arg -> { + FloatWritable val = (FloatWritable) converter.convert(arg.get()); + result.set(String.valueOf(floatTransform.apply(val.get()))); + }; + break; + + case DOUBLE: + converter = new PrimitiveObjectInspectorConverter.DoubleConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableDoubleObjectInspector); + Function doubleTransform = Transforms.truncate(truncateLength).bind(Types.DoubleType.get()); + evaluator = arg -> { + DoubleWritable val = (DoubleWritable) converter.convert(arg.get()); + result.set(String.valueOf(doubleTransform.apply(val.get()))); + }; + break; + + default: + throw new UDFArgumentException( + " ICEBERG_TRUNCATE() only takes STRING/CHAR/VARCHAR/INT/LONG/DECIMAL/FLOAT/DOUBLE" + + " types as first argument, got " + inputType); + } + outputOI = PrimitiveObjectInspectorFactory.writableStringObjectInspector; + return outputOI; + } + + private static int getTruncateLength(ObjectInspector arg) throws UDFArgumentException { + UDFArgumentException udfArgumentException = new UDFArgumentException("ICEBERG_TRUNCATE() second argument can " + + " only take an int type, but got " + arg.getTypeName()); + if (arg.getCategory() != ObjectInspector.Category.PRIMITIVE) { + throw udfArgumentException; + } + PrimitiveObjectInspector.PrimitiveCategory inputType = ((PrimitiveObjectInspector) arg).getPrimitiveCategory(); + if (inputType != PrimitiveObjectInspector.PrimitiveCategory.INT) { + throw udfArgumentException; + } + return ((WritableConstantIntObjectInspector) arg).getWritableConstantValue().get(); + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + DeferredObject argument = arguments[0]; + if (argument == null || argument.get() == null) { + return null; + } else { + evaluator.apply(argument); + } + return result; + } + + @Override + public String getDisplayString(String[] children) { + return getStandardDisplayString("iceberg_truncate", children); + } +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergYear.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergYear.java new file mode 100644 index 000000000000..19754bfc01d6 --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/udf/GenericUDFIcebergYear.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.iceberg.mr.hive.udf; + +import java.util.function.Function; +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.io.DateWritableV2; +import org.apache.hadoop.hive.serde2.io.TimestampLocalTZWritable; +import org.apache.hadoop.hive.serde2.io.TimestampWritableV2; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorConverter; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.IntWritable; +import org.apache.iceberg.transforms.Transforms; +import org.apache.iceberg.types.Types; + +/** + * GenericUDFIcebergYear - UDF that wraps around Iceberg's year transform function + */ +@Description(name = "iceberg_year", + value = "_FUNC_(value) - " + + "Returns the bucket value calculated by Iceberg year transform function ", + extended = "Example:\n > SELECT _FUNC_('2023-01-01');\n 2023") +public class GenericUDFIcebergYear extends GenericUDF { + private final IntWritable result = new IntWritable(); + private transient PrimitiveObjectInspector argumentOI; + private transient ObjectInspectorConverters.Converter converter; + + @FunctionalInterface + private interface UDFEvalFunction { + void apply(T argument) throws HiveException; + } + + private transient UDFEvalFunction evaluator; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + if (arguments.length != 1) { + throw new UDFArgumentLengthException( + "ICEBERG_YEAR requires 1 arguments (value), but got " + arguments.length); + } + + if (arguments[0].getCategory() != ObjectInspector.Category.PRIMITIVE) { + throw new UDFArgumentException( + "ICEBERG_YEAR first argument takes primitive types, got " + argumentOI.getTypeName()); + } + argumentOI = (PrimitiveObjectInspector) arguments[0]; + + PrimitiveObjectInspector.PrimitiveCategory inputType = argumentOI.getPrimitiveCategory(); + ObjectInspector outputOI; + switch (inputType) { + case DATE: + converter = new PrimitiveObjectInspectorConverter.DateConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableDateObjectInspector); + Function dateTransform = Transforms.year().bind(Types.DateType.get()); + evaluator = arg -> { + DateWritableV2 val = (DateWritableV2) converter.convert(arg.get()); + result.set(dateTransform.apply(val.getDays())); + }; + break; + + case TIMESTAMP: + converter = new PrimitiveObjectInspectorConverter.TimestampConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampObjectInspector); + Function timestampTransform = Transforms.year().bind(Types.TimestampType.withoutZone()); + evaluator = arg -> { + TimestampWritableV2 val = (TimestampWritableV2) converter.convert(arg.get()); + result.set(timestampTransform.apply(val.getNanos() / 1000L)); + }; + break; + + case TIMESTAMPLOCALTZ: + converter = new PrimitiveObjectInspectorConverter.TimestampLocalTZConverter(argumentOI, + PrimitiveObjectInspectorFactory.writableTimestampTZObjectInspector); + Function timestampLocalTzTransform = Transforms.year().bind(Types.TimestampType.withZone()); + evaluator = arg -> { + TimestampLocalTZWritable val = (TimestampLocalTZWritable) converter.convert(arg.get()); + result.set(timestampLocalTzTransform.apply(val.getNanos() / 1000L)); + }; + break; + + default: + throw new UDFArgumentException( + " ICEBERG_YEAR() only takes DATE/TIMESTAMP/TIMESTAMPLOCALTZ" + + " types as first argument, got " + inputType); + } + outputOI = PrimitiveObjectInspectorFactory.writableIntObjectInspector; + return outputOI; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + DeferredObject argument = arguments[0]; + if (argument == null || argument.get() == null) { + return null; + } else { + evaluator.apply(argument); + } + return result; + } + + @Override + public String getDisplayString(String[] children) { + return getStandardDisplayString("iceberg_year", children); + } + +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergCopyOnWriteRecordWriter.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergCopyOnWriteRecordWriter.java index f13f13ec0465..142f73c85491 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergCopyOnWriteRecordWriter.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergCopyOnWriteRecordWriter.java @@ -45,7 +45,7 @@ class HiveIcebergCopyOnWriteRecordWriter extends HiveIcebergWriterBase { private final int currentSpecId; private final GenericRecord rowDataTemplate; - private final List referencedDataFiles; + private final List replacedDataFiles; HiveIcebergCopyOnWriteRecordWriter(Schema schema, Map specs, int currentSpecId, FileWriterFactory fileWriterFactory, OutputFileFactory fileFactory, FileIO io, @@ -54,7 +54,7 @@ class HiveIcebergCopyOnWriteRecordWriter extends HiveIcebergWriterBase { new ClusteredDataWriter<>(fileWriterFactory, fileFactory, io, targetFileSize)); this.currentSpecId = currentSpecId; this.rowDataTemplate = GenericRecord.create(schema); - this.referencedDataFiles = Lists.newArrayList(); + this.replacedDataFiles = Lists.newArrayList(); } @Override @@ -72,7 +72,7 @@ public void write(Writable row) throws IOException { .withFileSizeInBytes(0) .withRecordCount(0) .build(); - referencedDataFiles.add(dataFile); + replacedDataFiles.add(dataFile); } else { writer.write(rowData, specs.get(currentSpecId), partition(rowData, currentSpecId)); } @@ -81,6 +81,6 @@ public void write(Writable row) throws IOException { @Override public FilesForCommit files() { List dataFiles = ((DataWriteResult) writer.result()).dataFiles(); - return FilesForCommit.onlyData(dataFiles, referencedDataFiles); + return FilesForCommit.onlyData(dataFiles, replacedDataFiles); } } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergDeleteWriter.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergDeleteWriter.java index bd61f101cd95..6753ffa46c25 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergDeleteWriter.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergDeleteWriter.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.io.Writable; import org.apache.iceberg.DeleteFile; import org.apache.iceberg.PartitionSpec; @@ -68,6 +69,7 @@ public void write(Writable row) throws IOException { @Override public FilesForCommit files() { List deleteFiles = ((DeleteWriteResult) writer.result()).deleteFiles(); - return FilesForCommit.onlyDelete(deleteFiles); + Set referencedDataFiles = ((DeleteWriteResult) writer.result()).referencedDataFiles(); + return FilesForCommit.onlyDelete(deleteFiles, referencedDataFiles); } } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergRecordWriter.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergRecordWriter.java index 26c4e0947d01..b43376ec7f99 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergRecordWriter.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/HiveIcebergRecordWriter.java @@ -29,9 +29,11 @@ import org.apache.iceberg.data.Record; import org.apache.iceberg.io.ClusteredDataWriter; import org.apache.iceberg.io.DataWriteResult; +import org.apache.iceberg.io.FanoutDataWriter; import org.apache.iceberg.io.FileIO; import org.apache.iceberg.io.FileWriterFactory; import org.apache.iceberg.io.OutputFileFactory; +import org.apache.iceberg.io.PartitioningWriter; import org.apache.iceberg.mr.hive.FilesForCommit; import org.apache.iceberg.mr.mapred.Container; @@ -41,9 +43,8 @@ class HiveIcebergRecordWriter extends HiveIcebergWriterBase { HiveIcebergRecordWriter(Schema schema, Map specs, int currentSpecId, FileWriterFactory fileWriterFactory, OutputFileFactory fileFactory, FileIO io, - long targetFileSize) { - super(schema, specs, io, - new ClusteredDataWriter<>(fileWriterFactory, fileFactory, io, targetFileSize)); + long targetFileSize, boolean fanoutEnabled) { + super(schema, specs, io, getIcebergDataWriter(fileWriterFactory, fileFactory, io, targetFileSize, fanoutEnabled)); this.currentSpecId = currentSpecId; } @@ -58,4 +59,11 @@ public FilesForCommit files() { List dataFiles = ((DataWriteResult) writer.result()).dataFiles(); return FilesForCommit.onlyData(dataFiles); } + + private static PartitioningWriter getIcebergDataWriter(FileWriterFactory fileWriterFactory, + OutputFileFactory fileFactory, FileIO io, + long targetFileSize, boolean fanoutEnabled) { + return fanoutEnabled ? new FanoutDataWriter<>(fileWriterFactory, fileFactory, io, targetFileSize) + : new ClusteredDataWriter<>(fileWriterFactory, fileFactory, io, targetFileSize); + } } diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/WriterBuilder.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/WriterBuilder.java index 2f7177b63810..c68bf8fb1d6a 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/WriterBuilder.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/writer/WriterBuilder.java @@ -45,6 +45,8 @@ public class WriterBuilder { private String queryId; private int poolSize; private Operation operation; + private boolean fanoutEnabled; + // A task may write multiple output files using multiple writers. Each of them must have a unique operationId. private static AtomicInteger operationNum = new AtomicInteger(0); @@ -85,6 +87,11 @@ public WriterBuilder operation(Operation newOperation) { return this; } + public WriterBuilder isFanoutEnabled(boolean isFanoutEnabled) { + this.fanoutEnabled = isFanoutEnabled; + return this; + } + public HiveIcebergWriter build() { Map properties = table.properties(); @@ -133,7 +140,7 @@ public HiveIcebergWriter build() { break; case OTHER: writer = new HiveIcebergRecordWriter(dataSchema, specs, currentSpecId, writerFactory, outputFileFactory, - io, targetFileSize); + io, targetFileSize, fanoutEnabled); break; default: // Update and Merge should be splitted to inserts and deletes diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java new file mode 100644 index 000000000000..6c51de9dabbe --- /dev/null +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/HiveIdentityPartitionConverters.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.mapreduce; + +import java.math.BigDecimal; +import org.apache.avro.generic.GenericData; +import org.apache.hadoop.hive.common.type.Date; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.Timestamp; +import org.apache.iceberg.types.Type; +import org.apache.iceberg.types.Types; +import org.apache.iceberg.util.DateTimeUtil; + +public class HiveIdentityPartitionConverters { + + private HiveIdentityPartitionConverters() { + } + + public static Object convertConstant(Type type, Object value) { + if (value == null) { + return null; + } + + switch (type.typeId()) { + case STRING: + return value.toString(); + case TIME: + return DateTimeUtil.timeFromMicros((Long) value); + case DATE: + return Date.ofEpochDay((Integer) value); + case TIMESTAMP: + if (((Types.TimestampType) type).shouldAdjustToUTC()) { + return DateTimeUtil.timestamptzFromMicros((Long) value).toOffsetTime(); + } else { + return new Timestamp(DateTimeUtil.timestampFromMicros((Long) value)); + } + case DECIMAL: + if (value.getClass().isAssignableFrom(BigDecimal.class)) { + return HiveDecimal.create((BigDecimal) value); + } + return value; + case FIXED: + if (value instanceof GenericData.Fixed) { + return ((GenericData.Fixed) value).bytes(); + } + return value; + default: + } + return value; + } + +} diff --git a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java index dc50a1e34010..754d78e4d933 100644 --- a/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java +++ b/iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/mapreduce/IcebergInputFormat.java @@ -33,8 +33,11 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.llap.LlapHiveUtils; import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.AuthorizationException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.mapred.JobConf; @@ -218,6 +221,12 @@ public List getSplits(JobContext context) { scan = applyConfig(conf, createTableScan(table, conf)); } + boolean allowDataFilesWithinTableLocationOnly = + conf.getBoolean(HiveConf.ConfVars.HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY.varname, + HiveConf.ConfVars.HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY.defaultBoolVal); + Path tableLocation = new Path(conf.get(InputFormatConfig.TABLE_LOCATION)); + + try (CloseableIterable tasksIterable = scan.planTasks()) { tasksIterable.forEach(task -> { if (applyResidual && (model == InputFormatConfig.InMemoryDataModel.HIVE || @@ -225,6 +234,9 @@ public List getSplits(JobContext context) { // TODO: We do not support residual evaluation for HIVE and PIG in memory data model yet checkResiduals(task); } + if (allowDataFilesWithinTableLocationOnly) { + validateFileLocations(task, tableLocation); + } splits.add(new IcebergSplit(conf, task)); }); } catch (IOException e) { @@ -241,6 +253,14 @@ public List getSplits(JobContext context) { return splits; } + private static void validateFileLocations(CombinedScanTask split, Path tableLocation) { + for (FileScanTask fileScanTask : split.files()) { + if (!FileUtils.isPathWithinSubtree(new Path(fileScanTask.file().path().toString()), tableLocation)) { + throw new AuthorizationException("The table contains paths which are outside the table location"); + } + } + } + private static void checkResiduals(CombinedScanTask task) { task.files().forEach(fileScanTask -> { Expression residual = fileScanTask.residual(); @@ -371,7 +391,7 @@ private CloseableIterable openVectorized(FileScanTask task, Schema readSchema "Vectorized read is unsupported for Hive 2 integration."); Path path = new Path(task.file().path().toString()); - Map idToConstant = constantsMap(task, IdentityPartitionConverters::convertConstant); + Map idToConstant = constantsMap(task, HiveIdentityPartitionConverters::convertConstant); Expression residual = HiveIcebergInputFormat.residualForTask(task, context.getConfiguration()); // TODO: We have to take care of the EncryptionManager when LLAP and vectorization is used @@ -524,7 +544,8 @@ private CloseableIterable newOrcIterable(InputFile inputFile, FileScanTask ta Types.StructType partitionType = Partitioning.partitionType(table); return PartitionUtil.constantsMap(task, partitionType, converter); } else if (projectsIdentityPartitionColumns) { - return PartitionUtil.constantsMap(task, converter); + Types.StructType partitionType = Partitioning.partitionType(table); + return PartitionUtil.constantsMap(task, partitionType, converter); } else { return Collections.emptyMap(); } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java index 0853124b6025..b5c9e2942b45 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerWithEngineBase.java @@ -186,9 +186,9 @@ public void before() throws IOException { // Fetch task conversion might kick in for certain queries preventing vectorization code path to be used, so // we turn it off explicitly to achieve better coverage. if (isVectorized) { - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); } else { - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "more"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); } } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java index d892a72b8221..9f88bae3496b 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergBranchOperation.java @@ -238,4 +238,18 @@ public void testCreateBranchFromTag() throws IOException, InterruptedException { "ALTER TABLE customers CREATE BRANCH %s FOR TAG AS OF %s", branchName2, branchName1))) .isInstanceOf(IllegalArgumentException.class).hasMessageEndingWith("does not exist"); } + + @Test + public void testCreateBranchWithNonLowerCase() throws InterruptedException, IOException { + Table table = + testTables.createTableWithVersions(shell, "customers", HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + String branchName = "test_Branch_1"; + Long snapshotId = table.history().get(0).snapshotId(); + shell.executeStatement( + String.format("ALTER TABLE customers CREATE BRANCH %s FOR SYSTEM_VERSION AS OF %d", branchName, snapshotId)); + // Select with non-lower case branch name shouldn't throw exception. + shell.executeStatement(String.format("SELECT * FROM default.customers.branch_%s", branchName)); + } } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java index acfe94126dc6..bc3c948c4ad4 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergCRUD.java @@ -618,7 +618,7 @@ public void testConcurrent2Deletes() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql); @@ -649,7 +649,7 @@ public void testConcurrent2Updates() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql); @@ -684,7 +684,7 @@ public void testConcurrentUpdateAndDelete() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql[i]); @@ -719,7 +719,7 @@ public void testConcurrent2MergeInserts() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES_WITHOUT_WRITE_CONFLICT); shell.executeStatement(sql); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergFilterFactory.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergFilterFactory.java index 1614d937c37f..3044f0467af3 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergFilterFactory.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergFilterFactory.java @@ -24,9 +24,6 @@ import java.sql.Timestamp; import java.time.LocalDate; import java.time.ZoneOffset; -import java.util.Collections; -import java.util.List; -import org.apache.hadoop.hive.ql.io.sarg.ExpressionTree; import org.apache.hadoop.hive.ql.io.sarg.PredicateLeaf; import org.apache.hadoop.hive.ql.io.sarg.SearchArgument; import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; @@ -39,7 +36,6 @@ import org.apache.iceberg.expressions.UnboundPredicate; import org.apache.iceberg.types.Types; import org.apache.iceberg.util.DateTimeUtil; -import org.assertj.core.api.Assertions; import org.junit.Test; import static org.junit.Assert.assertEquals; @@ -138,21 +134,6 @@ public void testBetweenOperand() { assertEquals(actual.right().op(), expected.right().op()); } - @Test - public void testUnsupportedBetweenOperandEmptyLeaves() { - SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); - final SearchArgument arg = - new MockSearchArgument( - builder - .startAnd() - .between("salary", PredicateLeaf.Type.LONG, 9000L, 15000L) - .end() - .build()); - Assertions.assertThatThrownBy(() -> HiveIcebergFilterFactory.generateFilterExpression(arg)) - .isInstanceOf(UnsupportedOperationException.class) - .hasMessage("Missing leaf literals: Leaf[empty]"); - } - @Test public void testIsNullOperand() { SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); @@ -278,69 +259,4 @@ private void assertPredicatesMatch(UnboundPredicate expected, UnboundPredicate a assertEquals(expected.literal(), actual.literal()); assertEquals(expected.ref().name(), actual.ref().name()); } - - private static class MockSearchArgument implements SearchArgument { - - private final SearchArgument delegate; - - MockSearchArgument(SearchArgument original) { - delegate = original; - } - - @Override - public ExpressionTree getExpression() { - return delegate.getExpression(); - } - - @Override - public ExpressionTree getCompactExpression() { - return null; - } - - @Override - public TruthValue evaluate(TruthValue[] leaves) { - return delegate.evaluate(leaves); - } - - @Override - public List getLeaves() { - return Collections.singletonList( - new PredicateLeaf() { - @Override - public Operator getOperator() { - return Operator.BETWEEN; - } - - @Override - public Type getType() { - return Type.LONG; - } - - @Override - public String getColumnName() { - return "salary"; - } - - @Override - public Object getLiteral() { - return null; - } - - @Override - public List getLiteralList() { - return Collections.emptyList(); - } - - @Override - public int getId() { - return 0; - } - - @Override - public String toString() { - return "Leaf[empty]"; - } - }); - } - } } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java index d0eb3ebc8f09..45f82b5a28aa 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergOutputCommitter.java @@ -224,7 +224,7 @@ private JobConf jobConf(Table table, int taskNum) { JobConf conf = new JobConf(); conf.setNumMapTasks(taskNum); conf.setNumReduceTasks(0); - conf.set(HiveConf.ConfVars.HIVEQUERYID.varname, QUERY_ID); + conf.set(HiveConf.ConfVars.HIVE_QUERY_ID.varname, QUERY_ID); conf.set(InputFormatConfig.OUTPUT_TABLES, table.name()); conf.set(InputFormatConfig.OPERATION_TYPE_PREFIX + table.name(), Context.Operation.OTHER.name()); conf.set(InputFormatConfig.TABLE_CATALOG_PREFIX + table.name(), diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergRestrictDataFiles.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergRestrictDataFiles.java new file mode 100644 index 000000000000..e9d6950ef460 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergRestrictDataFiles.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.mr.hive; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import org.apache.commons.collections4.ListUtils; +import org.apache.iceberg.AssertHelpers; +import org.apache.iceberg.catalog.TableIdentifier; +import org.junit.BeforeClass; +import org.junit.Test; + +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY; + +public class TestHiveIcebergRestrictDataFiles extends HiveIcebergStorageHandlerWithEngineBase { + + @BeforeClass + public static void beforeClass() { + shell = HiveIcebergStorageHandlerTestUtils.shell( + Collections.singletonMap(HIVE_ICEBERG_ALLOW_DATAFILES_IN_TABLE_LOCATION_ONLY.varname, "true")); + } + + @Test + public void testRestrictDataFiles() throws IOException, InterruptedException { + TableIdentifier table1 = TableIdentifier.of("default", "tab1"); + testTables.createTableWithVersions(shell, table1.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 2); + + AssertHelpers.assertThrows("Should throw exception since there are files outside the table directory", + IllegalArgumentException.class, "The table contains paths which are outside the table location", + () -> shell.executeStatement("SELECT * FROM " + table1.name())); + + // Create another table with files within the table location + TableIdentifier table2 = TableIdentifier.of("default", "tab2"); + testTables.createTableWithVersions(shell, table2.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, + fileFormat, null, 0); + + shell.executeStatement( + testTables.getInsertQuery(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, table2, false)); + + List result = shell.executeStatement("SELECT * FROM " + table2.name()); + + HiveIcebergTestUtils.validateData(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, + HiveIcebergTestUtils.valueForRow(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, result), 0); + + // Insert some more records to generate new Data file + shell.executeStatement( + testTables.getInsertQuery(HiveIcebergStorageHandlerTestUtils.OTHER_CUSTOMER_RECORDS_1, table2, false)); + + result = shell.executeStatement("SELECT * FROM " + table2.name()); + + HiveIcebergTestUtils.validateData(ListUtils.union(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, + HiveIcebergStorageHandlerTestUtils.OTHER_CUSTOMER_RECORDS_1), + HiveIcebergTestUtils.valueForRow(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, result), 0); + } +} diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStatistics.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStatistics.java index d31ebcf80d1e..f377b523be5f 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStatistics.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStatistics.java @@ -81,7 +81,7 @@ public void testAnalyzeTableComputeStatisticsEmptyTable() throws IOException, TE public void testStatsWithInsert() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of()); @@ -107,7 +107,7 @@ public void testStatsWithInsert() { public void testStatsWithInsertOverwrite() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of()); @@ -125,7 +125,7 @@ public void testStatsWithPartitionedInsert() { PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) .identity("last_name").build(); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, fileFormat, ImmutableList.of()); @@ -150,7 +150,7 @@ public void testStatsWithCTAS() { shell.executeStatement(testTables.getInsertQuery( HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, TableIdentifier.of("default", "source"), false)); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); shell.executeStatement(String.format( "CREATE TABLE target STORED BY ICEBERG %s %s AS SELECT * FROM source", testTables.locationForCreateTableSQL(TableIdentifier.of("default", "target")), @@ -169,7 +169,7 @@ public void testStatsWithPartitionedCTAS() { shell.executeStatement(testTables.getInsertQuery( HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, TableIdentifier.of("default", "source"), false)); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); shell.executeStatement(String.format( "CREATE TABLE target PARTITIONED BY (dept, name) STORED BY ICEBERG %s AS SELECT * FROM source s", testTables.propertiesForCreateTableSQL( @@ -189,7 +189,7 @@ public void testStatsRemoved() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of()); @@ -215,7 +215,7 @@ public void testStatsRemoved() throws IOException { public void testColumnStatsAccurate() throws Exception { TableIdentifier identifier = TableIdentifier.of("default", "customers"); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of()); @@ -236,7 +236,7 @@ public void testColumnStatsAccurate() throws Exception { public void testMergeStatsWithInsert() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of()); @@ -267,7 +267,7 @@ public void testMergeStatsWithInsert() { public void testIcebergColStatsPath() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); - shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); + shell.setHiveSessionValue(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, true); Table table = testTables.createTable(shell, identifier.name(), HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, PartitionSpec.unpartitioned(), fileFormat, ImmutableList.of()); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java index 48da53e2b685..4995d795912d 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java @@ -75,6 +75,7 @@ import org.apache.iceberg.relocated.com.google.common.collect.ImmutableSet; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.relocated.com.google.common.util.concurrent.MoreExecutors; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; @@ -1978,7 +1979,7 @@ public void checkIcebergTableLocation() throws TException, InterruptedException, String dBName = "testdb"; String tableName = "tbl"; String dbWithSuffix = "/" + dBName + ".db"; - String dbManagedLocation = shell.getHiveConf().get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname) + dbWithSuffix; + String dbManagedLocation = shell.getHiveConf().get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname) + dbWithSuffix; String dbExternalLocation = shell.getHiveConf().get(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname) + dbWithSuffix; Path noExistedTblPath = new Path(dbManagedLocation + "/" + tableName); @@ -2054,7 +2055,87 @@ public void testSnycProperties() throws TException, InterruptedException { Assert.assertEquals(icePros.get(TableProperties.MERGE_MODE), hmsProps.get(TableProperties.MERGE_MODE)); } + @Test + public void testCreateTableWithIdentifierField() { + TableIdentifier identifier = TableIdentifier.of("default", "customers"); + String query = String.format("CREATE EXTERNAL TABLE customers (" + + "customer_id BIGINT primary key disable novalidate, " + + "first_name STRING, " + + "last_name STRING) " + + "STORED BY iceBerg %s TBLPROPERTIES ('%s'='%s')", + testTables.locationForCreateTableSQL(identifier), + InputFormatConfig.CATALOG_NAME, + testTables.catalogName()); + shell.executeStatement(query); + org.apache.iceberg.Table table = testTables.loadTable(identifier); + Assert.assertEquals("Should have new identifier field", + Sets.newHashSet(table.schema().findField("customer_id").fieldId()), table.schema().identifierFieldIds()); + } + + @Test + public void testCreateTableWithMultiIdentifierFields() { + TableIdentifier identifier = TableIdentifier.of("default", "customers"); + String query = String.format("CREATE EXTERNAL TABLE customers (" + + "customer_id BIGINT," + + "first_name STRING, " + + "last_name STRING," + + "primary key (customer_id, first_name) disable novalidate) " + + "STORED BY iceBerg %s TBLPROPERTIES ('%s'='%s')", + testTables.locationForCreateTableSQL(identifier), + InputFormatConfig.CATALOG_NAME, + testTables.catalogName()); + shell.executeStatement(query); + org.apache.iceberg.Table table = testTables.loadTable(identifier); + Assert.assertEquals("Should have new two identifier fields", + Sets.newHashSet(table.schema().findField("customer_id").fieldId(), + table.schema().findField("first_name").fieldId()), table.schema().identifierFieldIds()); + } + + @Test + public void testCreateTableFailedWithNestedIdentifierField() { + TableIdentifier identifier = TableIdentifier.of("default", "customers"); + String query = String.format("CREATE EXTERNAL TABLE customers_with_nested_column (" + + "customer_id BIGINT," + + "first_name STRING, " + + "last_name STRING, " + + "user_info STRUCT primary key disable novalidate) " + + "STORED BY iceBerg %s TBLPROPERTIES ('%s'='%s')", + testTables.locationForCreateTableSQL(identifier), + InputFormatConfig.CATALOG_NAME, + testTables.catalogName()); + + // Iceberg table doesn't support nested column as identifier field. + Assert.assertThrows( + "Cannot add field user_info as an identifier field: not a primitive type field", + IllegalArgumentException.class, () -> shell.executeStatement(query)); + } + private String getCurrentSnapshotForHiveCatalogTable(org.apache.iceberg.Table icebergTable) { return ((BaseMetastoreTableOperations) ((BaseTable) icebergTable).operations()).currentMetadataLocation(); } + + @Test + public void testCreateTableWithPercentInName() throws IOException { + Assume.assumeTrue("This test is only for hive catalog", testTableType == TestTables.TestTableType.HIVE_CATALOG); + + TableIdentifier identifier = TableIdentifier.of("default", "[|]#&%_@"); + + shell.executeStatement("CREATE EXTERNAL TABLE `[|]#&%_@` " + + "STORED BY ICEBERG " + + testTables.locationForCreateTableSQL(identifier) + + "TBLPROPERTIES ('" + InputFormatConfig.TABLE_SCHEMA + "'='" + + SchemaParser.toJson(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) + "', " + + "'" + InputFormatConfig.PARTITION_SPEC + "'='" + + PartitionSpecParser.toJson(PartitionSpec.unpartitioned()) + "', " + + "'dummy'='test', " + + "'" + InputFormatConfig.EXTERNAL_TABLE_PURGE + "'='TRUE', " + + "'" + InputFormatConfig.CATALOG_NAME + "'='" + testTables.catalogName() + "')"); + + // Check the Iceberg table data + org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); + Assert.assertEquals( + HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.asStruct(), + icebergTable.schema().asStruct()); + Assert.assertEquals(PartitionSpec.unpartitioned(), icebergTable.spec()); + } } diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java index 79e477bbe59f..4898ce813383 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestHiveShell.java @@ -85,9 +85,9 @@ public void setHiveSessionValue(String key, boolean value) { public void start() { // Create a copy of the HiveConf for the metastore metastore.start(new HiveConf(hs2Conf), 20); - hs2Conf.setVar(HiveConf.ConfVars.METASTOREURIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREURIS)); - hs2Conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, - metastore.hiveConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE)); + hs2Conf.setVar(HiveConf.ConfVars.METASTORE_URIS, metastore.hiveConf().getVar(HiveConf.ConfVars.METASTORE_URIS)); + hs2Conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, + metastore.hiveConf().getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE)); hs2Conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL, metastore.hiveConf().getVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL)); @@ -195,22 +195,21 @@ private HiveConf initializeConf() { // Switch off optimizers in order to contain the map reduction within this JVM hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESKEWJOIN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SKEW_JOIN, false); // Speed up test execution - hiveConf.setLongVar(HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL, 1L); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + hiveConf.setLongVar(HiveConf.ConfVars.HIVE_COUNTERS_PULL_INTERVAL, 1L); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); // Resource configuration hiveConf.setInt("mapreduce.map.memory.mb", 1024); // Tez configuration hiveConf.setBoolean("tez.local.mode", true); - // TODO: enable below option once HIVE-26445 is investigated - // hiveConf.setBoolean("tez.local.mode.without.network", true); + hiveConf.setBoolean("tez.local.mode.without.network", true); // Disable vectorization for HiveIcebergInputFormat hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java index cd4aa88e7807..c9d0bf7e3bc1 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/TestOptimisticRetry.java @@ -55,7 +55,7 @@ public void testConcurrentOverlappingUpdates() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES); shell.executeStatement(sql); @@ -89,7 +89,7 @@ public void testNonOverlappingConcurrent2Updates() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES); shell.executeStatement(sql[i]); @@ -126,7 +126,7 @@ public void testConcurrent2MergeInserts() { .run(i -> { init(shell, testTables, temp, executionEngine); HiveConf.setBoolVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, isVectorized); - HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); HiveConf.setVar(shell.getHiveConf(), HiveConf.ConfVars.HIVE_QUERY_REEXECUTION_STRATEGIES, RETRY_STRATEGIES); shell.executeStatement(sql); diff --git a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/writer/TestHiveIcebergDeleteWriter.java b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/writer/TestHiveIcebergDeleteWriter.java index 9cac3a02620b..a2b6ee5fe63d 100644 --- a/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/writer/TestHiveIcebergDeleteWriter.java +++ b/iceberg/iceberg-handler/src/test/java/org/apache/iceberg/mr/hive/writer/TestHiveIcebergDeleteWriter.java @@ -20,6 +20,7 @@ package org.apache.iceberg.mr.hive.writer; import java.io.IOException; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.List; @@ -35,6 +36,7 @@ import org.apache.iceberg.mr.mapred.Container; import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.util.CharSequenceSet; import org.apache.iceberg.util.StructLikeSet; import org.junit.Assert; import org.junit.Test; @@ -54,18 +56,24 @@ public void testDelete() throws IOException { Collections.sort(deleteRecords, Comparator.comparing(a -> a.getField(MetadataColumns.PARTITION_COLUMN_NAME).toString())); + CharSequenceSet expectedDataFiles = CharSequenceSet.empty(); Container container = new Container<>(); for (Record deleteRecord : deleteRecords) { container.set(deleteRecord); testWriter.write(container); + expectedDataFiles.add((String) deleteRecord.getField(MetadataColumns.FILE_PATH.name())); } testWriter.close(false); RowDelta rowDelta = table.newRowDelta(); testWriter.files().deleteFiles().forEach(rowDelta::addDeletes); + Collection actualDataFiles = testWriter.files().referencedDataFiles(); rowDelta.commit(); + Assert.assertTrue("Actual :" + actualDataFiles + " Expected: " + expectedDataFiles, + actualDataFiles.containsAll(expectedDataFiles)); + StructLikeSet expected = rowSetWithoutIds(RECORDS, DELETED_IDS); StructLikeSet actual = actualRowSet(table); diff --git a/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_unpartitioned.q b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_unpartitioned.q index cf192789234a..815207398237 100644 --- a/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_unpartitioned.q +++ b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_copy_on_write_unpartitioned.q @@ -38,3 +38,13 @@ explain delete from tbl_ice where a in (select t1.a from tbl_ice t1 join tbl_sta delete from tbl_ice where a in (select t1.a from tbl_ice t1 join tbl_standard_other t2 on t1.a = t2.a); select count(*) from tbl_ice; -- 0 + +-- null cases +drop table if exists tbl_ice_with_nulls; +create table tbl_ice_with_nulls (id int, name string) stored by iceberg tblproperties('format-version'='2', 'write.delete.mode'='copy-on-write'); + +insert into tbl_ice_with_nulls values +(1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR'),(6, 'POIU'),(7, 'SDF'),(9, null),(8,'POIKL'),(10, 'YUIO'); + +delete from tbl_ice_with_nulls where id in (select id from tbl_ice_with_nulls where id > 9) or name in (select name from tbl_ice_with_nulls where name = 'sdf'); +select * from tbl_ice_with_nulls order by id; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_copy_on_write.q b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_mixed.q similarity index 88% rename from iceberg/iceberg-handler/src/test/queries/positive/iceberg_copy_on_write.q rename to iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_mixed.q index 3bfe3dee23f4..bbad2f380e35 100644 --- a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_copy_on_write.q +++ b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_mixed.q @@ -5,7 +5,6 @@ --! qt:replace:/("total-files-size":")\d+/$1#FileSize#/ --! qt:replace:/("removed-files-size":")\d+/$1#FileSize#/ - -- create an unpartitioned table with skip delete data set to false create table ice01 (id int, name string) Stored by Iceberg stored as ORC TBLPROPERTIES('format-version'='2'); @@ -17,7 +16,6 @@ insert into ice01 values (1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR') -- delete using MOR delete from ice01 where id>9 OR id=8; - select * from ice01; -- should be 2 files, one data file and one positional delete file. @@ -34,21 +32,24 @@ select * from ice01; -- should be only one data file. select summary from default.ice01.snapshots; -select * from ice01; +-- null cases -delete from ice01 where name=null; +delete from ice01 where null; +select * from ice01; +delete from ice01 where not null; select * from ice01; -delete from ice01 where name!=null; +delete from ice01 where name = null; +select * from ice01; +delete from ice01 where name != null; select * from ice01; --disable cbo due to HIVE-27070 set hive.cbo.enable=false; delete from ice01 where name is null; - select * from ice01; -- clean up diff --git a/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_unpartitioned_parquet.q b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_unpartitioned_parquet.q index c0430591e3ca..89c2822374f8 100644 --- a/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_unpartitioned_parquet.q +++ b/iceberg/iceberg-handler/src/test/queries/positive/delete_iceberg_unpartitioned_parquet.q @@ -28,3 +28,13 @@ insert into tbl_standard_other values (10, 'ten'), (444, 'tutu'); delete from tbl_ice where a in (select t1.a from tbl_ice t1 join tbl_standard_other t2 on t1.a = t2.a); select count(*) from tbl_ice; -- 0 + +-- null cases +drop table if exists tbl_ice_with_nulls; +create table tbl_ice_with_nulls (id int, name string) stored by iceberg tblproperties('format-version'='2'); + +insert into tbl_ice_with_nulls values +(1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR'),(6, 'POIU'),(7, 'SDF'),(9, null),(8,'POIKL'),(10, 'YUIO'); + +delete from tbl_ice_with_nulls where id in (select id from tbl_ice_with_nulls where id > 9) or name in (select name from tbl_ice_with_nulls where name = 'sdf'); +select * from tbl_ice_with_nulls order by id; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/dynamic_partition_writes.q b/iceberg/iceberg-handler/src/test/queries/positive/dynamic_partition_writes.q index 85063e2b095a..54e46a2a2609 100644 --- a/iceberg/iceberg-handler/src/test/queries/positive/dynamic_partition_writes.q +++ b/iceberg/iceberg-handler/src/test/queries/positive/dynamic_partition_writes.q @@ -8,6 +8,16 @@ drop table if exists tbl_target_identity; drop table if exists tbl_target_bucket; drop table if exists tbl_target_mixed; drop table if exists tbl_bucket_date; +drop table if exists tbl_target_truncate_str; +drop table if exists tbl_target_truncate_int; +drop table if exists tbl_target_truncate_bigint; +drop table if exists tbl_year_date; +drop table if exists tbl_year_timestamp; +drop table if exists tbl_month_date; +drop table if exists tbl_month_timestamp; +drop table if exists tbl_day_date; +drop table if exists tbl_day_timestamp; +drop table if exists tbl_hour_timestamp; create external table tbl_src (a int, b string, c bigint) stored by iceberg stored as orc; insert into tbl_src values (1, 'EUR', 10), (2, 'EUR', 10), (3, 'USD', 11), (4, 'EUR', 12), (5, 'HUF', 30), (6, 'USD', 10), (7, 'USD', 100), (8, 'PLN', 20), (9, 'PLN', 11), (10, 'CZK', 5), (12, NULL, NULL); @@ -53,4 +63,92 @@ tblproperties ('parquet.compression'='snappy','format-version'='2'); insert into tbl_bucket_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); update tbl_bucket_date set date_time_date = '2018-07-02' where date_time_date = '2018-07-03'; -select count(*) from tbl_bucket_date where date_time_date = '2018-07-02'; +select count(*) from tbl_bucket_date where date_time_date = '2018-07-02'; + +--truncate case - should invoke GenericUDFIcebergTruncate to truncate the column value and use for clustering and sorting +create external table tbl_target_truncate_str (a int, ccy string) partitioned by spec (truncate(2, ccy)) stored by iceberg stored as orc; +explain insert into table tbl_target_truncate_str select a, b from tbl_src; +insert into table tbl_target_truncate_str select a, b from tbl_src; +select * from tbl_target_truncate_str order by a, ccy; + +create external table tbl_target_truncate_int (id int, ccy string) partitioned by spec (truncate(2, id)) stored by iceberg stored as orc; +explain insert into table tbl_target_truncate_int select a, b from tbl_src; +insert into table tbl_target_truncate_int select a, b from tbl_src; +select * from tbl_target_truncate_int order by id, ccy; + +create external table tbl_target_truncate_bigint (a int, ccy bigint) partitioned by spec (truncate(2, ccy)) stored by iceberg stored as orc; +explain insert into table tbl_target_truncate_bigint select a, c from tbl_src; +insert into table tbl_target_truncate_bigint select a, c from tbl_src; +select * from tbl_target_truncate_bigint order by a, ccy; + +create external table tbl_target_truncate_decimal (a int, b string, ccy decimal(10,6)) partitioned by spec (truncate(2, b), truncate(3, ccy)) stored by iceberg stored as orc; +explain insert into table tbl_target_truncate_decimal select a, b, 1.567894 from tbl_src; +insert into table tbl_target_truncate_decimal select a, b, 1.567894 from tbl_src; +select * from tbl_target_truncate_decimal order by a, b; + +--year case - should invoke GenericUDFIcebergYear to convert the date/timestamp value to year and use for clustering and sorting +create external table tbl_year_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, year(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_year_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); +insert into tbl_year_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); +select * from tbl_year_date order by id, date_time_date; + +create external table tbl_year_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, year(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_year_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +insert into tbl_year_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +select * from tbl_year_timestamp order by id, date_time_timestamp; + +--month case - should invoke GenericUDFIcebergMonth to convert the date/timestamp value to month and use for clustering and sorting +create external table tbl_month_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, month(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_month_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); +insert into tbl_month_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); +select * from tbl_month_date order by id, date_time_date; + +create external table tbl_month_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, month(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_month_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +insert into tbl_month_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +select * from tbl_month_timestamp order by id, date_time_timestamp; + +--day case - should invoke GenericUDFIcebergMonth to convert the date/timestamp value to day and use for clustering and sorting +create external table tbl_day_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, day(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_day_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); +insert into tbl_day_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018); +select * from tbl_day_date order by id, date_time_date; + +create external table tbl_day_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, day(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_day_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +insert into tbl_day_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +select * from tbl_day_timestamp order by id, date_time_timestamp; + +--hour case - should invoke GenericUDFIcebergMonth to convert the date/timestamp value to day and use for clustering and sorting +create external table tbl_hour_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, hour(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2'); + +explain insert into tbl_hour_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +insert into tbl_hour_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018); +select * from tbl_hour_timestamp order by id, date_time_timestamp; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_partition_evolution.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_partition_evolution.q new file mode 100644 index 000000000000..67b44d625a55 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_partition_evolution.q @@ -0,0 +1,65 @@ +-- SORT_QUERY_RESULTS +-- Mask neededVirtualColumns due to non-strict order +--! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/ +-- Mask the totalSize value as it can have slight variability, causing test flakiness +--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/ +-- Mask random uuid +--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask a random snapshot id +--! qt:replace:/(\s+current-snapshot-id\s+)\S+(\s*)/$1#Masked#/ +-- Mask added file size +--! qt:replace:/(\S\"added-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask total file size +--! qt:replace:/(\S\"total-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask current-snapshot-timestamp-ms +--! qt:replace:/(\s+current-snapshot-timestamp-ms\s+)\S+(\s*)/$1#Masked#$2/ +--! qt:replace:/(MAJOR\s+succeeded\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +-- Mask compaction id as they will be allocated in parallel threads +--! qt:replace:/^[0-9]/#Masked#/ + +set hive.llap.io.enabled=true; +set hive.vectorized.execution.enabled=true; +set hive.optimize.shared.work.merge.ts.schema=true; + +create table ice_orc ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2'); + +insert into ice_orc VALUES ('fn1','ln1', 1, 10, 100); +insert into ice_orc VALUES ('fn2','ln2', 1, 10, 100); +insert into ice_orc VALUES ('fn3','ln3', 1, 11, 100); +alter table ice_orc set partition spec(company_id, dept_id); +insert into ice_orc VALUES ('fn4','ln4', 1, 11, 100); +insert into ice_orc VALUES ('fn5','ln5', 2, 20, 100); +insert into ice_orc VALUES ('fn6','ln6', 2, 20, 100); +alter table ice_orc set partition spec(company_id, dept_id, team_id); +insert into ice_orc VALUES ('fn7','ln7', 2, 21, 100); +insert into ice_orc VALUES ('fn8','ln8', 2, 21, 100); + +update ice_orc set last_name = 'ln1a' where first_name='fn1'; +update ice_orc set last_name = 'ln2a' where first_name='fn2'; +update ice_orc set last_name = 'ln3a' where first_name='fn3'; +update ice_orc set last_name = 'ln4a' where first_name='fn4'; +alter table ice_orc set partition spec(company_id, dept_id); +update ice_orc set last_name = 'ln5a' where first_name='fn5'; +update ice_orc set last_name = 'ln6a' where first_name='fn6'; +update ice_orc set last_name = 'ln7a' where first_name='fn7'; +update ice_orc set last_name = 'ln8a' where first_name='fn8'; + +delete from ice_orc where last_name in ('ln1a', 'ln8a'); + +select * from ice_orc; +describe formatted ice_orc; + +explain alter table ice_orc COMPACT 'major' and wait; +alter table ice_orc COMPACT 'major' and wait; + +select * from ice_orc; +describe formatted ice_orc; +show compactions; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_partitioned.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_partitioned.q new file mode 100644 index 000000000000..4b8dc30bdaa3 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_partitioned.q @@ -0,0 +1,91 @@ +-- SORT_QUERY_RESULTS +-- Mask neededVirtualColumns due to non-strict order +--! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/ +-- Mask the totalSize value as it can have slight variability, causing test flakiness +--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/ +-- Mask random uuid +--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask a random snapshot id +--! qt:replace:/(\s+current-snapshot-id\s+)\S+(\s*)/$1#Masked#/ +-- Mask added file size +--! qt:replace:/(\S\"added-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask total file size +--! qt:replace:/(\S\"total-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask current-snapshot-timestamp-ms +--! qt:replace:/(\s+current-snapshot-timestamp-ms\s+)\S+(\s*)/$1#Masked#$2/ +--! qt:replace:/(MAJOR\s+succeeded\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +-- Mask compaction id as they will be allocated in parallel threads +--! qt:replace:/^[0-9]/#Masked#/ + +set hive.llap.io.enabled=true; +set hive.vectorized.execution.enabled=true; +set hive.optimize.shared.work.merge.ts.schema=true; + +create table ice_orc ( + first_name string, + last_name string + ) +partitioned by (dept_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2'); + +insert into ice_orc VALUES ('fn1','ln1', 1); +insert into ice_orc VALUES ('fn2','ln2', 1); +insert into ice_orc VALUES ('fn3','ln3', 1); +insert into ice_orc VALUES ('fn4','ln4', 1); +insert into ice_orc VALUES ('fn5','ln5', 2); +insert into ice_orc VALUES ('fn6','ln6', 2); +insert into ice_orc VALUES ('fn7','ln7', 2); + +update ice_orc set last_name = 'ln1a' where first_name='fn1'; +update ice_orc set last_name = 'ln2a' where first_name='fn2'; +update ice_orc set last_name = 'ln3a' where first_name='fn3'; +update ice_orc set last_name = 'ln4a' where first_name='fn4'; +update ice_orc set last_name = 'ln5a' where first_name='fn5'; +update ice_orc set last_name = 'ln6a' where first_name='fn6'; +update ice_orc set last_name = 'ln7a' where first_name='fn7'; + +delete from ice_orc where last_name in ('ln1a', 'ln2a', 'ln7a'); + +select * from ice_orc; +describe formatted ice_orc; + +explain alter table ice_orc COMPACT 'major' and wait; +alter table ice_orc COMPACT 'major' and wait; + +select * from ice_orc; +describe formatted ice_orc; +show compactions; + +-- Starting second set of inserts/updates/deletes and calling compaction at the end +-- to check that subsequent compaction works + +insert into ice_orc VALUES ('fn11','ln11', 1); +insert into ice_orc VALUES ('fn12','ln12', 1); +insert into ice_orc VALUES ('fn13','ln13', 1); +insert into ice_orc VALUES ('fn14','ln14', 1); +insert into ice_orc VALUES ('fn15','ln15', 2); +insert into ice_orc VALUES ('fn16','ln16', 2); +insert into ice_orc VALUES ('fn17','ln17', 2); +insert into ice_orc VALUES ('fn18','ln18', 2); + +update ice_orc set last_name = 'ln11a' where first_name='fn11'; +update ice_orc set last_name = 'ln12a' where first_name='fn12'; +update ice_orc set last_name = 'ln13a' where first_name='fn13'; +update ice_orc set last_name = 'ln14a' where first_name='fn14'; +update ice_orc set last_name = 'ln15a' where first_name='fn15'; +update ice_orc set last_name = 'ln16a' where first_name='fn16'; +update ice_orc set last_name = 'ln17a' where first_name='fn17'; +update ice_orc set last_name = 'ln18a' where first_name='fn18'; + +delete from ice_orc where last_name in ('ln11a', 'ln12a', 'ln17a', 'ln18a'); + +select * from ice_orc; +describe formatted ice_orc; + +explain alter table ice_orc COMPACT 'major' and wait; +alter table ice_orc COMPACT 'major' and wait; + +select * from ice_orc; +describe formatted ice_orc; +show compactions; diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_schema_evolution.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_schema_evolution.q new file mode 100644 index 000000000000..939904a8cba1 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_schema_evolution.q @@ -0,0 +1,66 @@ +-- SORT_QUERY_RESULTS +-- Mask neededVirtualColumns due to non-strict order +--! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/ +-- Mask the totalSize value as it can have slight variability, causing test flakiness +--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/ +-- Mask random uuid +--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask a random snapshot id +--! qt:replace:/(\s+current-snapshot-id\s+)\S+(\s*)/$1#Masked#/ +-- Mask added file size +--! qt:replace:/(\S\"added-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask total file size +--! qt:replace:/(\S\"total-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask current-snapshot-timestamp-ms +--! qt:replace:/(\s+current-snapshot-timestamp-ms\s+)\S+(\s*)/$1#Masked#$2/ +--! qt:replace:/(MAJOR\s+succeeded\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +-- Mask compaction id as they will be allocated in parallel threads +--! qt:replace:/^[0-9]/#Masked#/ + +set hive.llap.io.enabled=true; +set hive.vectorized.execution.enabled=true; +set hive.optimize.shared.work.merge.ts.schema=true; + +create table ice_orc ( + first_name string, + last_name string + ) +partitioned by (dept_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2'); + +insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn1','ln1', 1); +insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn2','ln2', 1); +insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn3','ln3', 1); +insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn4','ln4', 1); + +alter table ice_orc add columns (address string); + +insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn5','ln5', 'addr_5', 2); +insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn6','ln6', 'addr_6', 2); +insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn7','ln7', 'addr_7', 2); +insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn8','ln8', 'addr_8', 2); + +update ice_orc set last_name = 'ln1a' where first_name='fn1'; +update ice_orc set last_name = 'ln2a' where first_name='fn2'; +update ice_orc set last_name = 'ln3a' where first_name='fn3'; +update ice_orc set last_name = 'ln4a' where first_name='fn4'; + +alter table ice_orc change first_name fname string; + +update ice_orc set last_name = 'ln5a' where fname='fn5'; +update ice_orc set last_name = 'ln6a' where fname='fn6'; +update ice_orc set last_name = 'ln7a' where fname='fn7'; +update ice_orc set last_name = 'ln8a' where fname='fn8'; + +delete from ice_orc where fname in ('fn1', 'fn3', 'fn7'); + +select * from ice_orc; +describe formatted ice_orc; + +explain alter table ice_orc COMPACT 'major' and wait; +alter table ice_orc COMPACT 'major' and wait; + +select * from ice_orc; +describe formatted ice_orc; +show compactions; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_unpartitioned.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_unpartitioned.q new file mode 100644 index 000000000000..92efb00767ec --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_major_compaction_unpartitioned.q @@ -0,0 +1,58 @@ +-- SORT_QUERY_RESULTS +-- Mask neededVirtualColumns due to non-strict order +--! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/ +-- Mask the totalSize value as it can have slight variability, causing test flakiness +--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/ +-- Mask random uuid +--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask a random snapshot id +--! qt:replace:/(\s+current-snapshot-id\s+)\S+(\s*)/$1#Masked#/ +-- Mask added file size +--! qt:replace:/(\S\"added-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask total file size +--! qt:replace:/(\S\"total-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask current-snapshot-timestamp-ms +--! qt:replace:/(\s+current-snapshot-timestamp-ms\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask the enqueue time which is based on current time +--! qt:replace:/(MAJOR\s+succeeded\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +-- Mask compaction id as they will be allocated in parallel threads +--! qt:replace:/^[0-9]/#Masked#/ + +set hive.llap.io.enabled=true; +set hive.vectorized.execution.enabled=true; +set hive.optimize.shared.work.merge.ts.schema=true; + +create table ice_orc ( + first_name string, + last_name string + ) +stored by iceberg stored as orc +tblproperties ('format-version'='2'); + +insert into ice_orc VALUES ('fn1','ln1'); +insert into ice_orc VALUES ('fn2','ln2'); +insert into ice_orc VALUES ('fn3','ln3'); +insert into ice_orc VALUES ('fn4','ln4'); +insert into ice_orc VALUES ('fn5','ln5'); +insert into ice_orc VALUES ('fn6','ln6'); +insert into ice_orc VALUES ('fn7','ln7'); + +update ice_orc set last_name = 'ln1a' where first_name='fn1'; +update ice_orc set last_name = 'ln2a' where first_name='fn2'; +update ice_orc set last_name = 'ln3a' where first_name='fn3'; +update ice_orc set last_name = 'ln4a' where first_name='fn4'; +update ice_orc set last_name = 'ln5a' where first_name='fn5'; +update ice_orc set last_name = 'ln6a' where first_name='fn6'; +update ice_orc set last_name = 'ln7a' where first_name='fn7'; + +delete from ice_orc where last_name in ('ln5a', 'ln6a', 'ln7a'); + +select * from ice_orc; +describe formatted ice_orc; + +explain alter table ice_orc COMPACT 'major' and wait; +alter table ice_orc COMPACT 'major' and wait; + +select * from ice_orc; +describe formatted ice_orc; +show compactions; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_optimize_table_unpartitioned.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_optimize_table_unpartitioned.q new file mode 100644 index 000000000000..5fbc108125ed --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_optimize_table_unpartitioned.q @@ -0,0 +1,58 @@ +-- SORT_QUERY_RESULTS +-- Mask neededVirtualColumns due to non-strict order +--! qt:replace:/(\s+neededVirtualColumns:\s)(.*)/$1#Masked#/ +-- Mask the totalSize value as it can have slight variability, causing test flakiness +--! qt:replace:/(\s+totalSize\s+)\S+(\s+)/$1#Masked#$2/ +-- Mask random uuid +--! qt:replace:/(\s+uuid\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask a random snapshot id +--! qt:replace:/(\s+current-snapshot-id\s+)\S+(\s*)/$1#Masked#/ +-- Mask added file size +--! qt:replace:/(\S\"added-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask total file size +--! qt:replace:/(\S\"total-files-size\\\":\\\")(\d+)(\\\")/$1#Masked#$3/ +-- Mask current-snapshot-timestamp-ms +--! qt:replace:/(\s+current-snapshot-timestamp-ms\s+)\S+(\s*)/$1#Masked#$2/ +-- Mask the enqueue time which is based on current time +--! qt:replace:/(MAJOR\s+succeeded\s+)[a-zA-Z0-9\-\.\s+]+(\s+manual)/$1#Masked#$2/ +-- Mask compaction id as they will be allocated in parallel threads +--! qt:replace:/^[0-9]/#Masked#/ + +set hive.llap.io.enabled=true; +set hive.vectorized.execution.enabled=true; +set hive.optimize.shared.work.merge.ts.schema=true; + +create table ice_orc ( + first_name string, + last_name string + ) +stored by iceberg stored as orc +tblproperties ('format-version'='2'); + +insert into ice_orc VALUES ('fn1','ln1'); +insert into ice_orc VALUES ('fn2','ln2'); +insert into ice_orc VALUES ('fn3','ln3'); +insert into ice_orc VALUES ('fn4','ln4'); +insert into ice_orc VALUES ('fn5','ln5'); +insert into ice_orc VALUES ('fn6','ln6'); +insert into ice_orc VALUES ('fn7','ln7'); + +update ice_orc set last_name = 'ln1a' where first_name='fn1'; +update ice_orc set last_name = 'ln2a' where first_name='fn2'; +update ice_orc set last_name = 'ln3a' where first_name='fn3'; +update ice_orc set last_name = 'ln4a' where first_name='fn4'; +update ice_orc set last_name = 'ln5a' where first_name='fn5'; +update ice_orc set last_name = 'ln6a' where first_name='fn6'; +update ice_orc set last_name = 'ln7a' where first_name='fn7'; + +delete from ice_orc where last_name in ('ln5a', 'ln6a', 'ln7a'); + +select * from ice_orc; +describe formatted ice_orc; + +explain optimize table ice_orc rewrite data; +optimize table ice_orc rewrite data; + +select * from ice_orc; +describe formatted ice_orc; +show compactions; \ No newline at end of file diff --git a/iceberg/iceberg-handler/src/test/queries/positive/iceberg_partition_vectorized_read.q b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_partition_vectorized_read.q new file mode 100644 index 000000000000..506f69488712 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/queries/positive/iceberg_partition_vectorized_read.q @@ -0,0 +1,24 @@ +set hive.vectorized.execution.enabled=true; + +CREATE EXTERNAL TABLE ice_date (`col1` int, `day` date, `calday` date) PARTITIONED BY SPEC (calday) stored by +iceberg tblproperties('format-version'='2'); +insert into ice_date values(1, '2020-11-20', '2020-11-20'), (1, '2020-11-20', '2020-11-20'); +select * from ice_date; +select count(calday) from ice_date; +select distinct(calday) from ice_date; + + +CREATE EXTERNAL TABLE ice_timestamp (`col1` int, `day` date, `times` timestamp) PARTITIONED BY SPEC (times) stored +by iceberg tblproperties('format-version'='2'); +insert into ice_timestamp values(1, '2020-11-20', '2020-11-20'), (1, '2020-11-20', '2020-11-20'); +select * from ice_timestamp; +select count(times) from ice_timestamp; +select distinct(times) from ice_timestamp; + + +CREATE EXTERNAL TABLE ice_decimal (`col1` int, `decimalA` decimal(5,2), `decimalC` decimal(5,2)) PARTITIONED BY SPEC +(decimalC) stored by iceberg tblproperties('format-version'='2'); +insert into ice_decimal values(1, 122.91, 102.21), (1, 12.32, 200.12); +select * from ice_decimal; +select distinct(decimalc) from ice_decimal; +select count(decimala) from ice_decimal where decimala=122.91; diff --git a/iceberg/iceberg-handler/src/test/queries/positive/merge_iceberg_copy_on_write_unpartitioned.q b/iceberg/iceberg-handler/src/test/queries/positive/merge_iceberg_copy_on_write_unpartitioned.q index 34ac6ffe9781..371e4b5e3129 100644 --- a/iceberg/iceberg-handler/src/test/queries/positive/merge_iceberg_copy_on_write_unpartitioned.q +++ b/iceberg/iceberg-handler/src/test/queries/positive/merge_iceberg_copy_on_write_unpartitioned.q @@ -11,6 +11,11 @@ insert into target_ice values (1, 'one', 50), (2, 'two', 51), (111, 'one', 55), insert into source values (1, 'one', 50), (2, 'two', 51), (3, 'three', 52), (4, 'four', 53), (5, 'five', 54), (111, 'one', 55); -- merge +explain +merge into target_ice as t using source src ON t.a = src.a +when matched and t.a > 100 THEN DELETE +when not matched then insert values (src.a, src.b, src.c); + explain merge into target_ice as t using source src ON t.a = src.a when matched and t.a > 100 THEN DELETE diff --git a/iceberg/iceberg-handler/src/test/results/positive/ctas_iceberg_partitioned_orc.q.out b/iceberg/iceberg-handler/src/test/results/positive/ctas_iceberg_partitioned_orc.q.out index d0c2aef2bf19..f6a407b35bea 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/ctas_iceberg_partitioned_orc.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/ctas_iceberg_partitioned_orc.q.out @@ -333,5 +333,5 @@ POSTHOOK: query: select * from tbl_ice POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_ice POSTHOOK: Output: hdfs://### HDFS PATH ### -1 one 3 1 two 4 +1 one 3 diff --git a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out index 6206f479aac6..333d5146d592 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_partitioned.q.out @@ -67,11 +67,11 @@ POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@tbl_ice Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 3' is a cross product -Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 9' is a cross product -Warning: Shuffle Join MERGEJOIN[226][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 11' is a cross product -Warning: Shuffle Join MERGEJOIN[233][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 6' is a cross product -Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 14' is a cross product +Warning: Shuffle Join MERGEJOIN[225][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product +Warning: Shuffle Join MERGEJOIN[227][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[223][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product +Warning: Shuffle Join MERGEJOIN[229][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 11' is a cross product +Warning: Shuffle Join MERGEJOIN[221][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 14' is a cross product Warning: Shuffle Join MERGEJOIN[231][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 16' is a cross product PREHOOK: query: explain delete from tbl_ice where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY @@ -92,28 +92,29 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 10 <- Reducer 20 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) - Reducer 11 <- Reducer 10 (XPROD_EDGE), Reducer 22 (XPROD_EDGE) + Reducer 10 <- Reducer 19 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) + Reducer 11 <- Reducer 10 (XPROD_EDGE), Reducer 24 (XPROD_EDGE) Reducer 12 <- Reducer 11 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE) Reducer 13 <- Reducer 12 (SIMPLE_EDGE) - Reducer 14 <- Map 1 (XPROD_EDGE), Reducer 19 (XPROD_EDGE) - Reducer 15 <- Reducer 14 (SIMPLE_EDGE), Reducer 20 (SIMPLE_EDGE) - Reducer 16 <- Reducer 15 (XPROD_EDGE), Reducer 22 (XPROD_EDGE) + Reducer 14 <- Map 1 (XPROD_EDGE), Reducer 20 (XPROD_EDGE) + Reducer 15 <- Reducer 14 (SIMPLE_EDGE), Reducer 19 (SIMPLE_EDGE) + Reducer 16 <- Reducer 15 (XPROD_EDGE), Reducer 24 (XPROD_EDGE) Reducer 17 <- Reducer 16 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE) Reducer 18 <- Reducer 17 (SIMPLE_EDGE), Union 7 (CONTAINS) - Reducer 19 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 20 (SIMPLE_EDGE) - Reducer 20 <- Map 1 (SIMPLE_EDGE) + Reducer 19 <- Map 1 (SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 19 (SIMPLE_EDGE) + Reducer 20 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 21 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 22 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 23 <- Map 1 (SIMPLE_EDGE) Reducer 24 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 25 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 3 <- Reducer 2 (XPROD_EDGE), Reducer 21 (XPROD_EDGE) - Reducer 4 <- Reducer 13 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE) - Reducer 5 <- Reducer 23 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) - Reducer 6 <- Reducer 24 (XPROD_EDGE), Reducer 5 (XPROD_EDGE), Union 7 (CONTAINS) + Reducer 4 <- Reducer 23 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE) + Reducer 5 <- Reducer 25 (XPROD_EDGE), Reducer 4 (XPROD_EDGE) + Reducer 6 <- Reducer 13 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE), Union 7 (CONTAINS) Reducer 8 <- Union 7 (SIMPLE_EDGE) - Reducer 9 <- Map 1 (XPROD_EDGE), Reducer 19 (XPROD_EDGE) + Reducer 9 <- Map 1 (XPROD_EDGE), Reducer 22 (XPROD_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -156,6 +157,18 @@ STAGE PLANS: Filter Operator predicate: (a <= 5) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -169,18 +182,6 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Group By Operator - keys: a (type: int) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(), count(a) minReductionHashAggr: 0.4 @@ -192,14 +193,11 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Filter Operator - predicate: (c > 800) (type: boolean) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() - minReductionHashAggr: 0.8333333 + minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -208,6 +206,9 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) + Filter Operator + predicate: (c > 800) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: c (type: int) minReductionHashAggr: 0.6666666 @@ -220,6 +221,19 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + minReductionHashAggr: 0.8333333 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Group By Operator aggregations: count(), count(c) minReductionHashAggr: 0.8333333 @@ -432,46 +446,13 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 3 Data size: 903 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 7 Data size: 2107 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 19 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 2 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col8 - Statistics: Num rows: 6 Data size: 1822 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 6 Data size: 1822 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col8 (type: boolean) - Reducer 20 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -504,6 +485,34 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col8 + Statistics: Num rows: 6 Data size: 1822 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 6 Data size: 1822 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col8 (type: boolean) + Reducer 20 + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Reducer 21 Execution mode: vectorized Reduce Operator Tree: @@ -530,11 +539,6 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Reducer 23 Execution mode: vectorized Reduce Operator Tree: @@ -569,6 +573,24 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) Reducer 24 + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 25 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -595,38 +617,14 @@ STAGE PLANS: expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col8 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 6 Data size: 1918 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + Reduce Output Operator + key expressions: _col2 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 6 Data size: 1918 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col5 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col5 (type: string) - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean) Reducer 4 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 _col5 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col2 (type: int) - Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - Reducer 5 Reduce Operator Tree: Merge Join Operator condition map: @@ -634,14 +632,14 @@ STAGE PLANS: keys: 0 _col2 (type: int) 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12 - Statistics: Num rows: 4 Data size: 1220 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12 + Statistics: Num rows: 6 Data size: 1942 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 4 Data size: 1220 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col12 (type: boolean) - Reducer 6 + Statistics: Num rows: 6 Data size: 1942 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col12 (type: boolean) + Reducer 5 Reduce Operator Tree: Merge Join Operator condition map: @@ -649,32 +647,53 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12, _col13, _col14 - Statistics: Num rows: 4 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12, _col13, _col14 + Statistics: Num rows: 6 Data size: 2038 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col11, _col12, _col14 - Statistics: Num rows: 4 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col12, _col14 + Statistics: Num rows: 6 Data size: 2038 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)) (type: boolean) - Statistics: Num rows: 4 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE + predicate: (((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null) or (_col14 is not null and (_col11 <> 0L)) or ((_col2 is null or (_col12 < _col11)) and null and (_col11 <> 0L) and _col14 is null)) is null or (((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) and ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)))) (type: boolean) + Statistics: Num rows: 6 Data size: 2038 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) - null sort order: aa - sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) - Statistics: Num rows: 7 Data size: 2107 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + key expressions: _col5 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col5 (type: string) + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + Reducer 6 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col5 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) + null sort order: aa + sort order: ++ + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) + Statistics: Num rows: 7 Data size: 2107 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 8 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -723,11 +742,11 @@ STAGE PLANS: Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 3' is a cross product -Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 9' is a cross product -Warning: Shuffle Join MERGEJOIN[226][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 11' is a cross product -Warning: Shuffle Join MERGEJOIN[233][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 6' is a cross product -Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 14' is a cross product +Warning: Shuffle Join MERGEJOIN[225][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product +Warning: Shuffle Join MERGEJOIN[227][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[223][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product +Warning: Shuffle Join MERGEJOIN[229][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 11' is a cross product +Warning: Shuffle Join MERGEJOIN[221][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 14' is a cross product Warning: Shuffle Join MERGEJOIN[231][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 16' is a cross product PREHOOK: query: delete from tbl_ice where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY @@ -1006,7 +1025,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 2 Data size: 642 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) Statistics: Num rows: 2 Data size: 642 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -1034,18 +1053,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 2 Data size: 602 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 6 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -1106,10 +1125,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 2 Data size: 602 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 9 @@ -1458,7 +1477,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 1 Data size: 321 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) Statistics: Num rows: 1 Data size: 321 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -1486,18 +1505,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 2 Data size: 602 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 6 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -1558,10 +1577,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 2 Data size: 602 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 9 diff --git a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out index 22256bab0106..061d697e03f9 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_copy_on_write_unpartitioned.q.out @@ -48,10 +48,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_ice - filterExpr: (((a <> 22) and (b <> 'one') and (b <> 'four')) or (b) IN ('one', 'four') or (a = 22)) (type: boolean) + filterExpr: (((b <> 'one') and (b <> 'four') and (a <> 22)) or ((a = 22) or (b) IN ('one', 'four')) is null or (b) IN ('one', 'four') or (a = 22)) (type: boolean) Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((a <> 22) and (b <> 'one') and (b <> 'four') and FILE__PATH is not null) (type: boolean) + predicate: ((((b <> 'one') and (b <> 'four') and (a <> 22)) or ((a = 22) or (b) IN ('one', 'four')) is null) and FILE__PATH is not null) (type: boolean) Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) @@ -250,11 +250,11 @@ POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@tbl_ice Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 3' is a cross product -Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 8' is a cross product -Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 10' is a cross product -Warning: Shuffle Join MERGEJOIN[231][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 6' is a cross product -Warning: Shuffle Join MERGEJOIN[218][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 13' is a cross product +Warning: Shuffle Join MERGEJOIN[223][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product +Warning: Shuffle Join MERGEJOIN[225][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[221][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 8' is a cross product +Warning: Shuffle Join MERGEJOIN[227][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 10' is a cross product +Warning: Shuffle Join MERGEJOIN[219][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 13' is a cross product Warning: Shuffle Join MERGEJOIN[229][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 15' is a cross product PREHOOK: query: explain delete from tbl_ice where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY @@ -275,27 +275,28 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 10 <- Reducer 21 (XPROD_EDGE), Reducer 9 (XPROD_EDGE) + Reducer 10 <- Reducer 23 (XPROD_EDGE), Reducer 9 (XPROD_EDGE) Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 22 (SIMPLE_EDGE) Reducer 12 <- Reducer 11 (SIMPLE_EDGE) - Reducer 13 <- Map 1 (XPROD_EDGE), Reducer 18 (XPROD_EDGE) - Reducer 14 <- Reducer 13 (SIMPLE_EDGE), Reducer 19 (SIMPLE_EDGE) - Reducer 15 <- Reducer 14 (XPROD_EDGE), Reducer 21 (XPROD_EDGE) + Reducer 13 <- Map 1 (XPROD_EDGE), Reducer 19 (XPROD_EDGE) + Reducer 14 <- Reducer 13 (SIMPLE_EDGE), Reducer 18 (SIMPLE_EDGE) + Reducer 15 <- Reducer 14 (XPROD_EDGE), Reducer 23 (XPROD_EDGE) Reducer 16 <- Reducer 15 (SIMPLE_EDGE), Reducer 22 (SIMPLE_EDGE) Reducer 17 <- Reducer 16 (SIMPLE_EDGE), Union 7 (CONTAINS) - Reducer 18 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 19 <- Map 1 (SIMPLE_EDGE) - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 19 (SIMPLE_EDGE) + Reducer 18 <- Map 1 (SIMPLE_EDGE) + Reducer 19 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 18 (SIMPLE_EDGE) Reducer 20 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 21 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 22 <- Map 1 (SIMPLE_EDGE) Reducer 23 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 24 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 3 <- Reducer 2 (XPROD_EDGE), Reducer 20 (XPROD_EDGE) - Reducer 4 <- Reducer 12 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE) - Reducer 5 <- Reducer 22 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) - Reducer 6 <- Reducer 23 (XPROD_EDGE), Reducer 5 (XPROD_EDGE), Union 7 (CONTAINS) - Reducer 8 <- Map 1 (XPROD_EDGE), Reducer 18 (XPROD_EDGE) - Reducer 9 <- Reducer 19 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE) + Reducer 4 <- Reducer 22 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE) + Reducer 5 <- Reducer 24 (XPROD_EDGE), Reducer 4 (XPROD_EDGE) + Reducer 6 <- Reducer 12 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE), Union 7 (CONTAINS) + Reducer 8 <- Map 1 (XPROD_EDGE), Reducer 21 (XPROD_EDGE) + Reducer 9 <- Reducer 18 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -338,6 +339,18 @@ STAGE PLANS: Filter Operator predicate: (a <= 5) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: a (type: int) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -351,18 +364,6 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Group By Operator - keys: a (type: int) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count(), count(a) minReductionHashAggr: 0.4 @@ -374,14 +375,11 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Filter Operator - predicate: (c > 800) (type: boolean) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() - minReductionHashAggr: 0.8333333 + minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -390,6 +388,9 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) + Filter Operator + predicate: (c > 800) (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator keys: c (type: int) minReductionHashAggr: 0.6666666 @@ -402,6 +403,19 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + minReductionHashAggr: 0.8333333 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Group By Operator aggregations: count(), count(c) minReductionHashAggr: 0.8333333 @@ -607,24 +621,6 @@ STAGE PLANS: serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe name: default.tbl_ice Reducer 18 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 19 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -657,6 +653,19 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) + Reducer 19 + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Reducer 2 Reduce Operator Tree: Merge Join Operator @@ -698,11 +707,6 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Reducer 22 Execution mode: vectorized Reduce Operator Tree: @@ -737,6 +741,24 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) Reducer 23 + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 24 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -763,38 +785,14 @@ STAGE PLANS: expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col8 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 6 Data size: 1918 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + Reduce Output Operator + key expressions: _col2 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 6 Data size: 1918 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col5 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col5 (type: string) - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean) Reducer 4 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 _col5 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col2 (type: int) - Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - Reducer 5 Reduce Operator Tree: Merge Join Operator condition map: @@ -802,14 +800,14 @@ STAGE PLANS: keys: 0 _col2 (type: int) 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12 - Statistics: Num rows: 4 Data size: 1220 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12 + Statistics: Num rows: 6 Data size: 1942 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 4 Data size: 1220 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col12 (type: boolean) - Reducer 6 + Statistics: Num rows: 6 Data size: 1942 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col12 (type: boolean) + Reducer 5 Reduce Operator Tree: Merge Join Operator condition map: @@ -817,27 +815,48 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12, _col13, _col14 - Statistics: Num rows: 4 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12, _col13, _col14 + Statistics: Num rows: 6 Data size: 2038 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col11, _col12, _col14 - Statistics: Num rows: 4 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col12, _col14 + Statistics: Num rows: 6 Data size: 2038 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)) (type: boolean) - Statistics: Num rows: 4 Data size: 1284 Basic stats: COMPLETE Column stats: COMPLETE + predicate: (((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null) or (_col14 is not null and (_col11 <> 0L)) or ((_col2 is null or (_col12 < _col11)) and null and (_col11 <> 0L) and _col14 is null)) is null or (((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) and ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)))) (type: boolean) + Statistics: Num rows: 6 Data size: 2038 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 7 Data size: 2107 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat - output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat - serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe - name: default.tbl_ice + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col5 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col5 (type: string) + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + Reducer 6 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col5 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 2107 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.tbl_ice Reducer 8 Reduce Operator Tree: Merge Join Operator @@ -892,11 +911,11 @@ STAGE PLANS: Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[63][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[222][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 3' is a cross product -Warning: Shuffle Join MERGEJOIN[220][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 8' is a cross product -Warning: Shuffle Join MERGEJOIN[224][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 10' is a cross product -Warning: Shuffle Join MERGEJOIN[231][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 6' is a cross product -Warning: Shuffle Join MERGEJOIN[218][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 13' is a cross product +Warning: Shuffle Join MERGEJOIN[223][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product +Warning: Shuffle Join MERGEJOIN[225][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[221][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 8' is a cross product +Warning: Shuffle Join MERGEJOIN[227][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 10' is a cross product +Warning: Shuffle Join MERGEJOIN[219][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 13' is a cross product Warning: Shuffle Join MERGEJOIN[229][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 15' is a cross product PREHOOK: query: delete from tbl_ice where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY @@ -1161,7 +1180,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 2 Data size: 642 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) Statistics: Num rows: 2 Data size: 642 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -1593,7 +1612,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 1 Data size: 321 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) Statistics: Num rows: 1 Data size: 321 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -1810,3 +1829,64 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_ice POSTHOOK: Output: hdfs://### HDFS PATH ### 0 +PREHOOK: query: drop table if exists tbl_ice_with_nulls +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_ice_with_nulls +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: create table tbl_ice_with_nulls (id int, name string) stored by iceberg tblproperties('format-version'='2', 'write.delete.mode'='copy-on-write') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_ice_with_nulls +POSTHOOK: query: create table tbl_ice_with_nulls (id int, name string) stored by iceberg tblproperties('format-version'='2', 'write.delete.mode'='copy-on-write') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_ice_with_nulls +PREHOOK: query: insert into tbl_ice_with_nulls values +(1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR'),(6, 'POIU'),(7, 'SDF'),(9, null),(8,'POIKL'),(10, 'YUIO') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_ice_with_nulls +POSTHOOK: query: insert into tbl_ice_with_nulls values +(1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR'),(6, 'POIU'),(7, 'SDF'),(9, null),(8,'POIKL'),(10, 'YUIO') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_ice_with_nulls +Warning: Shuffle Join MERGEJOIN[57][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[59][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[60][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[211][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 3' is a cross product +Warning: Shuffle Join MERGEJOIN[213][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[214][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[209][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 8' is a cross product +Warning: Shuffle Join MERGEJOIN[215][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 10' is a cross product +Warning: Shuffle Join MERGEJOIN[216][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 11' is a cross product +Warning: Shuffle Join MERGEJOIN[207][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 13' is a cross product +Warning: Shuffle Join MERGEJOIN[217][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 15' is a cross product +Warning: Shuffle Join MERGEJOIN[218][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 16' is a cross product +PREHOOK: query: delete from tbl_ice_with_nulls where id in (select id from tbl_ice_with_nulls where id > 9) or name in (select name from tbl_ice_with_nulls where name = 'sdf') +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_ice_with_nulls +PREHOOK: Output: default@tbl_ice_with_nulls +POSTHOOK: query: delete from tbl_ice_with_nulls where id in (select id from tbl_ice_with_nulls where id > 9) or name in (select name from tbl_ice_with_nulls where name = 'sdf') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_ice_with_nulls +POSTHOOK: Output: default@tbl_ice_with_nulls +PREHOOK: query: select * from tbl_ice_with_nulls order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_ice_with_nulls +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_ice_with_nulls order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_ice_with_nulls +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 ABC +2 CBS +3 NULL +4 POPI +5 AQWR +6 POIU +7 SDF +8 POIKL +9 NULL diff --git a/iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_mixed.q.out similarity index 87% rename from iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out rename to iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_mixed.q.out index 4d80795bc156..4599a458520a 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/iceberg_copy_on_write.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_mixed.q.out @@ -84,17 +84,17 @@ Stage-4 <-Reducer 2 [CONTAINS] File Output Operator [FS_46] table:{"name:":"default.ice01"} - Select Operator [SEL_44] (rows=2 width=295) + Select Operator [SEL_44] (rows=3 width=295) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Merge Join Operator [MERGEJOIN_43] (rows=2 width=295) + Merge Join Operator [MERGEJOIN_43] (rows=3 width=295) Conds:RS_57._col4=RS_63._col0(Left Semi),Output:["_col0","_col1","_col2","_col3","_col4","_col5"] <-Map 1 [SIMPLE_EDGE] vectorized SHUFFLE [RS_57] PartitionCols:_col4 - Select Operator [SEL_55] (rows=2 width=295) + Select Operator [SEL_55] (rows=5 width=295) Output:["_col0","_col1","_col2","_col3","_col4","_col5"] - Filter Operator [FIL_53] (rows=2 width=91) - predicate:((id <= 4) and (id <> 2) and FILE__PATH is not null) + Filter Operator [FIL_53] (rows=5 width=91) + predicate:((((id <= 4) and (id <> 2)) or ((id > 4) or (id = 2)) is null) and ((id <= 4) or (id <> 2) or ((id > 4) or (id = 2)) is null) and FILE__PATH is not null) TableScan [TS_0] (rows=7 width=78) default@ice01,ice01,Tbl:COMPLETE,Col:COMPLETE,Output:["id","name"] <-Reducer 4 [SIMPLE_EDGE] vectorized @@ -165,6 +165,35 @@ POSTHOOK: Output: hdfs://### HDFS PATH ### {"added-data-files":"1","added-records":"9","added-files-size":"#FileSize#","changed-partition-count":"1","total-records":"9","total-files-size":"#FileSize#","total-data-files":"1","total-delete-files":"0","total-position-deletes":"0","total-equality-deletes":"0"} {"added-position-delete-files":"1","added-delete-files":"1","added-files-size":"#FileSize#","added-position-deletes":"2","changed-partition-count":"1","total-records":"9","total-files-size":"#FileSize#","total-data-files":"1","total-delete-files":"1","total-position-deletes":"2","total-equality-deletes":"0"} {"added-data-files":"1","deleted-data-files":"1","added-records":"3","deleted-records":"9","added-files-size":"#FileSize#","removed-files-size":"#FileSize#","changed-partition-count":"1","total-records":"3","total-files-size":"#FileSize#","total-data-files":"1","total-delete-files":"1","total-position-deletes":"2","total-equality-deletes":"0"} +PREHOOK: query: delete from ice01 where null +PREHOOK: type: QUERY +PREHOOK: Input: default@ice01 +PREHOOK: Output: default@ice01 +POSTHOOK: query: delete from ice01 where null +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice01 +POSTHOOK: Output: default@ice01 +PREHOOK: query: select * from ice01 +PREHOOK: type: QUERY +PREHOOK: Input: default@ice01 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from ice01 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice01 +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 ABC +3 NULL +4 POPI +PREHOOK: query: delete from ice01 where not null +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Input: default@ice01 +PREHOOK: Output: default@ice01 +POSTHOOK: query: delete from ice01 where not null +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Input: default@ice01 +POSTHOOK: Output: default@ice01 PREHOOK: query: select * from ice01 PREHOOK: type: QUERY PREHOOK: Input: default@ice01 @@ -176,12 +205,12 @@ POSTHOOK: Output: hdfs://### HDFS PATH ### 1 ABC 3 NULL 4 POPI -PREHOOK: query: delete from ice01 where name=null +PREHOOK: query: delete from ice01 where name = null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Input: default@ice01 PREHOOK: Output: default@ice01 -POSTHOOK: query: delete from ice01 where name=null +POSTHOOK: query: delete from ice01 where name = null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Input: default@ice01 @@ -197,12 +226,12 @@ POSTHOOK: Output: hdfs://### HDFS PATH ### 1 ABC 3 NULL 4 POPI -PREHOOK: query: delete from ice01 where name!=null +PREHOOK: query: delete from ice01 where name != null PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Input: default@ice01 PREHOOK: Output: default@ice01 -POSTHOOK: query: delete from ice01 where name!=null +POSTHOOK: query: delete from ice01 where name != null POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Input: default@ice01 diff --git a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out index 3672e210f2e1..85b3b2a7b56d 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/delete_iceberg_unpartitioned_parquet.q.out @@ -154,3 +154,58 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_ice POSTHOOK: Output: hdfs://### HDFS PATH ### 0 +PREHOOK: query: drop table if exists tbl_ice_with_nulls +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_ice_with_nulls +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: create table tbl_ice_with_nulls (id int, name string) stored by iceberg tblproperties('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_ice_with_nulls +POSTHOOK: query: create table tbl_ice_with_nulls (id int, name string) stored by iceberg tblproperties('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_ice_with_nulls +PREHOOK: query: insert into tbl_ice_with_nulls values +(1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR'),(6, 'POIU'),(7, 'SDF'),(9, null),(8,'POIKL'),(10, 'YUIO') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_ice_with_nulls +POSTHOOK: query: insert into tbl_ice_with_nulls values +(1, 'ABC'),(2, 'CBS'),(3, null),(4, 'POPI'),(5, 'AQWR'),(6, 'POIU'),(7, 'SDF'),(9, null),(8,'POIKL'),(10, 'YUIO') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_ice_with_nulls +Warning: Shuffle Join MERGEJOIN[57][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[59][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[60][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 5' is a cross product +Warning: Shuffle Join MERGEJOIN[59][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[61][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[62][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 5' is a cross product +PREHOOK: query: delete from tbl_ice_with_nulls where id in (select id from tbl_ice_with_nulls where id > 9) or name in (select name from tbl_ice_with_nulls where name = 'sdf') +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_ice_with_nulls +PREHOOK: Output: default@tbl_ice_with_nulls +POSTHOOK: query: delete from tbl_ice_with_nulls where id in (select id from tbl_ice_with_nulls where id > 9) or name in (select name from tbl_ice_with_nulls where name = 'sdf') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_ice_with_nulls +POSTHOOK: Output: default@tbl_ice_with_nulls +PREHOOK: query: select * from tbl_ice_with_nulls order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_ice_with_nulls +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_ice_with_nulls order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_ice_with_nulls +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 ABC +2 CBS +3 NULL +4 POPI +5 AQWR +6 POIU +7 SDF +8 POIKL +9 NULL diff --git a/iceberg/iceberg-handler/src/test/results/positive/dynamic_partition_writes.q.out b/iceberg/iceberg-handler/src/test/results/positive/dynamic_partition_writes.q.out index a05ebf9af733..b7690c5579ff 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/dynamic_partition_writes.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/dynamic_partition_writes.q.out @@ -28,6 +28,66 @@ PREHOOK: Output: database:default POSTHOOK: query: drop table if exists tbl_bucket_date POSTHOOK: type: DROPTABLE POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_target_truncate_str +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_target_truncate_str +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_target_truncate_int +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_target_truncate_int +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_target_truncate_bigint +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_target_truncate_bigint +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_year_date +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_year_date +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_year_timestamp +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_year_timestamp +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_month_date +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_month_date +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_month_timestamp +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_month_timestamp +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_day_date +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_day_date +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_day_timestamp +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_day_timestamp +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: drop table if exists tbl_hour_timestamp +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists tbl_hour_timestamp +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default PREHOOK: query: create external table tbl_src (a int, b string, c bigint) stored by iceberg stored as orc PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -606,3 +666,974 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@tbl_bucket_date POSTHOOK: Output: hdfs://### HDFS PATH ### 1 +PREHOOK: query: create external table tbl_target_truncate_str (a int, ccy string) partitioned by spec (truncate(2, ccy)) stored by iceberg stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_target_truncate_str +POSTHOOK: query: create external table tbl_target_truncate_str (a int, ccy string) partitioned by spec (truncate(2, ccy)) stored by iceberg stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_target_truncate_str +PREHOOK: query: explain insert into table tbl_target_truncate_str select a, b from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_str +POSTHOOK: query: explain insert into table tbl_target_truncate_str select a, b from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_str +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_target_truncate_str"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_18] + table:{"name:":"default.tbl_target_truncate_str"} + Select Operator [SEL_17] + Output:["_col0","_col1","iceberg_truncate(_col1, 2)"] + <-Map 1 [SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:iceberg_truncate(_col1, 2) + Select Operator [SEL_12] (rows=22 width=87) + Output:["_col0","_col1"] + TableScan [TS_0] (rows=22 width=87) + default@tbl_src,tbl_src,Tbl:COMPLETE,Col:COMPLETE,Output:["a","b"] + Reducer 3 vectorized + File Output Operator [FS_21] + Select Operator [SEL_20] (rows=1 width=530) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"] + Group By Operator [GBY_19] (rows=1 width=332) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","max(VALUE._col5)","avg(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_16] + Group By Operator [GBY_15] (rows=1 width=400) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["min(a)","max(a)","count(1)","count(a)","compute_bit_vector_hll(a)","max(length(ccy))","avg(COALESCE(length(ccy),0))","count(ccy)","compute_bit_vector_hll(ccy)"] + Select Operator [SEL_14] (rows=22 width=87) + Output:["a","ccy"] + Please refer to the previous Select Operator [SEL_12] + +PREHOOK: query: insert into table tbl_target_truncate_str select a, b from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_str +POSTHOOK: query: insert into table tbl_target_truncate_str select a, b from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_str +PREHOOK: query: select * from tbl_target_truncate_str order by a, ccy +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_target_truncate_str +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_target_truncate_str order by a, ccy +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_target_truncate_str +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 EUR +2 EUR +3 USD +4 EUR +5 HUF +6 USD +7 USD +8 PLN +9 PLN +10 CZK +10 EUR +12 NULL +20 EUR +30 USD +40 EUR +50 HUF +60 USD +70 USD +80 PLN +90 PLN +100 CZK +110 NULL +PREHOOK: query: create external table tbl_target_truncate_int (id int, ccy string) partitioned by spec (truncate(2, id)) stored by iceberg stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_target_truncate_int +POSTHOOK: query: create external table tbl_target_truncate_int (id int, ccy string) partitioned by spec (truncate(2, id)) stored by iceberg stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_target_truncate_int +PREHOOK: query: explain insert into table tbl_target_truncate_int select a, b from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_int +POSTHOOK: query: explain insert into table tbl_target_truncate_int select a, b from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_int +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_target_truncate_int"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_18] + table:{"name:":"default.tbl_target_truncate_int"} + Select Operator [SEL_17] + Output:["_col0","_col1","iceberg_truncate(_col0, 2)"] + <-Map 1 [SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:iceberg_truncate(_col0, 2) + Select Operator [SEL_12] (rows=22 width=87) + Output:["_col0","_col1"] + TableScan [TS_0] (rows=22 width=87) + default@tbl_src,tbl_src,Tbl:COMPLETE,Col:COMPLETE,Output:["a","b"] + Reducer 3 vectorized + File Output Operator [FS_21] + Select Operator [SEL_20] (rows=1 width=530) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"] + Group By Operator [GBY_19] (rows=1 width=332) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","max(VALUE._col5)","avg(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_16] + Group By Operator [GBY_15] (rows=1 width=400) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["min(id)","max(id)","count(1)","count(id)","compute_bit_vector_hll(id)","max(length(ccy))","avg(COALESCE(length(ccy),0))","count(ccy)","compute_bit_vector_hll(ccy)"] + Select Operator [SEL_14] (rows=22 width=87) + Output:["id","ccy"] + Please refer to the previous Select Operator [SEL_12] + +PREHOOK: query: insert into table tbl_target_truncate_int select a, b from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_int +POSTHOOK: query: insert into table tbl_target_truncate_int select a, b from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_int +PREHOOK: query: select * from tbl_target_truncate_int order by id, ccy +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_target_truncate_int +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_target_truncate_int order by id, ccy +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_target_truncate_int +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 EUR +2 EUR +3 USD +4 EUR +5 HUF +6 USD +7 USD +8 PLN +9 PLN +10 CZK +10 EUR +12 NULL +20 EUR +30 USD +40 EUR +50 HUF +60 USD +70 USD +80 PLN +90 PLN +100 CZK +110 NULL +PREHOOK: query: create external table tbl_target_truncate_bigint (a int, ccy bigint) partitioned by spec (truncate(2, ccy)) stored by iceberg stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_target_truncate_bigint +POSTHOOK: query: create external table tbl_target_truncate_bigint (a int, ccy bigint) partitioned by spec (truncate(2, ccy)) stored by iceberg stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_target_truncate_bigint +PREHOOK: query: explain insert into table tbl_target_truncate_bigint select a, c from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_bigint +POSTHOOK: query: explain insert into table tbl_target_truncate_bigint select a, c from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_bigint +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_target_truncate_bigint"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_18] + table:{"name:":"default.tbl_target_truncate_bigint"} + Select Operator [SEL_17] + Output:["_col0","_col1","iceberg_truncate(_col1, 2)"] + <-Map 1 [SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:iceberg_truncate(_col1, 2) + Select Operator [SEL_12] (rows=22 width=11) + Output:["_col0","_col1"] + TableScan [TS_0] (rows=22 width=11) + default@tbl_src,tbl_src,Tbl:COMPLETE,Col:COMPLETE,Output:["a","c"] + Reducer 3 vectorized + File Output Operator [FS_21] + Select Operator [SEL_20] (rows=1 width=528) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11"] + Group By Operator [GBY_19] (rows=1 width=336) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_16] + Group By Operator [GBY_15] (rows=1 width=336) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8"],aggregations:["min(a)","max(a)","count(1)","count(a)","compute_bit_vector_hll(a)","min(ccy)","max(ccy)","count(ccy)","compute_bit_vector_hll(ccy)"] + Select Operator [SEL_14] (rows=22 width=11) + Output:["a","ccy"] + Please refer to the previous Select Operator [SEL_12] + +PREHOOK: query: insert into table tbl_target_truncate_bigint select a, c from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_bigint +POSTHOOK: query: insert into table tbl_target_truncate_bigint select a, c from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_bigint +PREHOOK: query: select * from tbl_target_truncate_bigint order by a, ccy +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_target_truncate_bigint +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_target_truncate_bigint order by a, ccy +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_target_truncate_bigint +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 10 +2 10 +3 11 +4 12 +5 30 +6 10 +7 100 +8 20 +9 11 +10 5 +10 12 +12 NULL +20 11 +30 100 +40 10 +50 30 +60 12 +70 20 +80 100 +90 18 +100 12 +110 NULL +PREHOOK: query: create external table tbl_target_truncate_decimal (a int, b string, ccy decimal(10,6)) partitioned by spec (truncate(2, b), truncate(3, ccy)) stored by iceberg stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_target_truncate_decimal +POSTHOOK: query: create external table tbl_target_truncate_decimal (a int, b string, ccy decimal(10,6)) partitioned by spec (truncate(2, b), truncate(3, ccy)) stored by iceberg stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_target_truncate_decimal +PREHOOK: query: explain insert into table tbl_target_truncate_decimal select a, b, 1.567894 from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_decimal +POSTHOOK: query: explain insert into table tbl_target_truncate_decimal select a, b, 1.567894 from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_decimal +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_target_truncate_decimal"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_19] + table:{"name:":"default.tbl_target_truncate_decimal"} + Select Operator [SEL_18] + Output:["_col0","_col1","_col2","iceberg_truncate(_col1, 2)","iceberg_truncate(_col2, 3)"] + <-Map 1 [SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_14] + PartitionCols:iceberg_truncate(_col1, 2), iceberg_truncate(_col2, 3) + Select Operator [SEL_13] (rows=22 width=199) + Output:["_col0","_col1","_col2"] + TableScan [TS_0] (rows=22 width=87) + default@tbl_src,tbl_src,Tbl:COMPLETE,Col:COMPLETE,Output:["a","b"] + Reducer 3 vectorized + File Output Operator [FS_22] + Select Operator [SEL_21] (rows=1 width=1005) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_20] (rows=1 width=708) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["min(VALUE._col0)","max(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","max(VALUE._col5)","avg(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] vectorized + PARTITION_ONLY_SHUFFLE [RS_17] + Group By Operator [GBY_16] (rows=1 width=776) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["min(a)","max(a)","count(1)","count(a)","compute_bit_vector_hll(a)","max(length(b))","avg(COALESCE(length(b),0))","count(b)","compute_bit_vector_hll(b)","min(ccy)","max(ccy)","count(ccy)","compute_bit_vector_hll(ccy)"] + Select Operator [SEL_15] (rows=22 width=199) + Output:["a","b","ccy"] + Please refer to the previous Select Operator [SEL_13] + +PREHOOK: query: insert into table tbl_target_truncate_decimal select a, b, 1.567894 from tbl_src +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_src +PREHOOK: Output: default@tbl_target_truncate_decimal +POSTHOOK: query: insert into table tbl_target_truncate_decimal select a, b, 1.567894 from tbl_src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_src +POSTHOOK: Output: default@tbl_target_truncate_decimal +PREHOOK: query: select * from tbl_target_truncate_decimal order by a, b +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_target_truncate_decimal +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_target_truncate_decimal order by a, b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_target_truncate_decimal +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 EUR 1.567894 +2 EUR 1.567894 +3 USD 1.567894 +4 EUR 1.567894 +5 HUF 1.567894 +6 USD 1.567894 +7 USD 1.567894 +8 PLN 1.567894 +9 PLN 1.567894 +10 CZK 1.567894 +10 EUR 1.567894 +12 NULL 1.567894 +20 EUR 1.567894 +30 USD 1.567894 +40 EUR 1.567894 +50 HUF 1.567894 +60 USD 1.567894 +70 USD 1.567894 +80 PLN 1.567894 +90 PLN 1.567894 +100 CZK 1.567894 +110 NULL 1.567894 +PREHOOK: query: create external table tbl_year_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, year(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_year_date +POSTHOOK: query: create external table tbl_year_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, year(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_year_date +PREHOOK: query: explain insert into tbl_year_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_year_date +POSTHOOK: query: explain insert into tbl_year_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_year_date +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_year_date"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_year_date"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_year(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_year(_col1) + Select Operator [SEL_3] (rows=1 width=240) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=890) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=596) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=664) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_date)","max(date_time_date)","count(date_time_date)","compute_bit_vector_hll(date_time_date)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=240) + Output:["id","date_time_date","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_year_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_year_date +POSTHOOK: query: insert into tbl_year_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_year_date +PREHOOK: query: select * from tbl_year_date order by id, date_time_date +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_year_date +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_year_date order by id, date_time_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_year_date +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 2018 +40568 2018-07-03 2018 +88669 2018-05-27 2018 +PREHOOK: query: create external table tbl_year_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, year(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_year_timestamp +POSTHOOK: query: create external table tbl_year_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, year(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_year_timestamp +PREHOOK: query: explain insert into tbl_year_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_year_timestamp +POSTHOOK: query: explain insert into tbl_year_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_year_timestamp +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_year_timestamp"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_year_timestamp"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_year(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_year(_col1) + Select Operator [SEL_3] (rows=1 width=224) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=863) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=564) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=632) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_timestamp)","max(date_time_timestamp)","count(date_time_timestamp)","compute_bit_vector_hll(date_time_timestamp)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=224) + Output:["id","date_time_timestamp","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_year_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_year_timestamp +POSTHOOK: query: insert into tbl_year_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_year_timestamp +PREHOOK: query: select * from tbl_year_timestamp order by id, date_time_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_year_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_year_timestamp order by id, date_time_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_year_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 12:45:56 2018 +40568 2018-07-03 06:07:56 2018 +88669 2018-05-27 11:12:00 2018 +PREHOOK: query: create external table tbl_month_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, month(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_month_date +POSTHOOK: query: create external table tbl_month_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, month(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_month_date +PREHOOK: query: explain insert into tbl_month_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_month_date +POSTHOOK: query: explain insert into tbl_month_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_month_date +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_month_date"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_month_date"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_month(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_month(_col1) + Select Operator [SEL_3] (rows=1 width=240) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=890) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=596) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=664) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_date)","max(date_time_date)","count(date_time_date)","compute_bit_vector_hll(date_time_date)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=240) + Output:["id","date_time_date","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_month_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_month_date +POSTHOOK: query: insert into tbl_month_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_month_date +PREHOOK: query: select * from tbl_month_date order by id, date_time_date +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_month_date +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_month_date order by id, date_time_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_month_date +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 2018 +40568 2018-07-03 2018 +88669 2018-05-27 2018 +PREHOOK: query: create external table tbl_month_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, month(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_month_timestamp +POSTHOOK: query: create external table tbl_month_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, month(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_month_timestamp +PREHOOK: query: explain insert into tbl_month_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_month_timestamp +POSTHOOK: query: explain insert into tbl_month_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_month_timestamp +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_month_timestamp"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_month_timestamp"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_month(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_month(_col1) + Select Operator [SEL_3] (rows=1 width=224) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=863) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=564) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=632) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_timestamp)","max(date_time_timestamp)","count(date_time_timestamp)","compute_bit_vector_hll(date_time_timestamp)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=224) + Output:["id","date_time_timestamp","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_month_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_month_timestamp +POSTHOOK: query: insert into tbl_month_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_month_timestamp +PREHOOK: query: select * from tbl_month_timestamp order by id, date_time_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_month_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_month_timestamp order by id, date_time_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_month_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 12:45:56 2018 +40568 2018-07-03 06:07:56 2018 +88669 2018-05-27 11:12:00 2018 +PREHOOK: query: create external table tbl_day_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, day(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_day_date +POSTHOOK: query: create external table tbl_day_date (id string, date_time_date date, year_partition int) + partitioned by spec (year_partition, day(date_time_date)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_day_date +PREHOOK: query: explain insert into tbl_day_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_day_date +POSTHOOK: query: explain insert into tbl_day_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_day_date +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_day_date"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_day_date"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_day(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_day(_col1) + Select Operator [SEL_3] (rows=1 width=240) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=890) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=596) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=664) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_date)","max(date_time_date)","count(date_time_date)","compute_bit_vector_hll(date_time_date)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=240) + Output:["id","date_time_date","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_day_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_day_date +POSTHOOK: query: insert into tbl_day_date values (88669, '2018-05-27', 2018), (40568, '2018-02-12', 2018), (40568, '2018-07-03', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_day_date +PREHOOK: query: select * from tbl_day_date order by id, date_time_date +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_day_date +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_day_date order by id, date_time_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_day_date +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 2018 +40568 2018-07-03 2018 +88669 2018-05-27 2018 +PREHOOK: query: create external table tbl_day_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, day(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_day_timestamp +POSTHOOK: query: create external table tbl_day_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, day(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_day_timestamp +PREHOOK: query: explain insert into tbl_day_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_day_timestamp +POSTHOOK: query: explain insert into tbl_day_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_day_timestamp +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_day_timestamp"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_day_timestamp"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_day(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_day(_col1) + Select Operator [SEL_3] (rows=1 width=224) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=863) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=564) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=632) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_timestamp)","max(date_time_timestamp)","count(date_time_timestamp)","compute_bit_vector_hll(date_time_timestamp)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=224) + Output:["id","date_time_timestamp","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_day_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_day_timestamp +POSTHOOK: query: insert into tbl_day_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_day_timestamp +PREHOOK: query: select * from tbl_day_timestamp order by id, date_time_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_day_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_day_timestamp order by id, date_time_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_day_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 12:45:56 2018 +40568 2018-07-03 06:07:56 2018 +88669 2018-05-27 11:12:00 2018 +PREHOOK: query: create external table tbl_hour_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, hour(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tbl_hour_timestamp +POSTHOOK: query: create external table tbl_hour_timestamp (id string, date_time_timestamp timestamp, year_partition int) + partitioned by spec (year_partition, hour(date_time_timestamp)) +stored by iceberg stored as parquet +tblproperties ('parquet.compression'='snappy','format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tbl_hour_timestamp +PREHOOK: query: explain insert into tbl_hour_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_hour_timestamp +POSTHOOK: query: explain insert into tbl_hour_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_hour_timestamp +Plan optimized by CBO. + +Vertex dependency in root stage +Reducer 2 <- Map 1 (SIMPLE_EDGE) +Reducer 3 <- Map 1 (CUSTOM_SIMPLE_EDGE) + +Stage-3 + Stats Work{} + Stage-0 + Move Operator + table:{"name:":"default.tbl_hour_timestamp"} + Stage-2 + Dependency Collection{} + Stage-1 + Reducer 2 vectorized + File Output Operator [FS_16] + table:{"name:":"default.tbl_hour_timestamp"} + Select Operator [SEL_15] + Output:["_col0","_col1","_col2","_col2","iceberg_hour(_col1)"] + <-Map 1 [SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_13] + PartitionCols:_col2, iceberg_hour(_col1) + Select Operator [SEL_3] (rows=1 width=224) + Output:["_col0","_col1","_col2"] + UDTF Operator [UDTF_2] (rows=1 width=64) + function name:inline + Select Operator [SEL_1] (rows=1 width=64) + Output:["_col0"] + TableScan [TS_0] (rows=1 width=10) + _dummy_database@_dummy_table,_dummy_table,Tbl:COMPLETE,Col:COMPLETE + Reducer 3 vectorized + File Output Operator [FS_19] + Select Operator [SEL_18] (rows=1 width=863) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12","_col13","_col14","_col15","_col16","_col17"] + Group By Operator [GBY_17] (rows=1 width=564) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(VALUE._col0)","avg(VALUE._col1)","count(VALUE._col2)","count(VALUE._col3)","compute_bit_vector_hll(VALUE._col4)","min(VALUE._col5)","max(VALUE._col6)","count(VALUE._col7)","compute_bit_vector_hll(VALUE._col8)","min(VALUE._col9)","max(VALUE._col10)","count(VALUE._col11)","compute_bit_vector_hll(VALUE._col12)"] + <-Map 1 [CUSTOM_SIMPLE_EDGE] + PARTITION_ONLY_SHUFFLE [RS_9] + Group By Operator [GBY_8] (rows=1 width=632) + Output:["_col0","_col1","_col2","_col3","_col4","_col5","_col6","_col7","_col8","_col9","_col10","_col11","_col12"],aggregations:["max(length(id))","avg(COALESCE(length(id),0))","count(1)","count(id)","compute_bit_vector_hll(id)","min(date_time_timestamp)","max(date_time_timestamp)","count(date_time_timestamp)","compute_bit_vector_hll(date_time_timestamp)","min(year_partition)","max(year_partition)","count(year_partition)","compute_bit_vector_hll(year_partition)"] + Select Operator [SEL_7] (rows=1 width=224) + Output:["id","date_time_timestamp","year_partition"] + Please refer to the previous Select Operator [SEL_3] + +PREHOOK: query: insert into tbl_hour_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@tbl_hour_timestamp +POSTHOOK: query: insert into tbl_hour_timestamp values (88669, '2018-05-27 11:12:00', 2018), (40568, '2018-02-12 12:45:56', 2018), (40568, '2018-07-03 06:07:56', 2018) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@tbl_hour_timestamp +PREHOOK: query: select * from tbl_hour_timestamp order by id, date_time_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@tbl_hour_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from tbl_hour_timestamp order by id, date_time_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@tbl_hour_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +40568 2018-02-12 12:45:56 2018 +40568 2018-07-03 06:07:56 2018 +88669 2018-05-27 11:12:00 2018 diff --git a/iceberg/iceberg-handler/src/test/results/positive/iceberg_partition_vectorized_read.q.out b/iceberg/iceberg-handler/src/test/results/positive/iceberg_partition_vectorized_read.q.out new file mode 100644 index 000000000000..3cc643380fd2 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/iceberg_partition_vectorized_read.q.out @@ -0,0 +1,139 @@ +PREHOOK: query: CREATE EXTERNAL TABLE ice_date (`col1` int, `day` date, `calday` date) PARTITIONED BY SPEC (calday) stored by +iceberg tblproperties('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_date +POSTHOOK: query: CREATE EXTERNAL TABLE ice_date (`col1` int, `day` date, `calday` date) PARTITIONED BY SPEC (calday) stored by +iceberg tblproperties('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_date +PREHOOK: query: insert into ice_date values(1, '2020-11-20', '2020-11-20'), (1, '2020-11-20', '2020-11-20') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_date +POSTHOOK: query: insert into ice_date values(1, '2020-11-20', '2020-11-20'), (1, '2020-11-20', '2020-11-20') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_date +PREHOOK: query: select * from ice_date +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_date +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from ice_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_date +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 2020-11-20 2020-11-20 +1 2020-11-20 2020-11-20 +PREHOOK: query: select count(calday) from ice_date +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_date +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select count(calday) from ice_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_date +POSTHOOK: Output: hdfs://### HDFS PATH ### +2 +PREHOOK: query: select distinct(calday) from ice_date +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_date +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select distinct(calday) from ice_date +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_date +POSTHOOK: Output: hdfs://### HDFS PATH ### +2020-11-20 +PREHOOK: query: CREATE EXTERNAL TABLE ice_timestamp (`col1` int, `day` date, `times` timestamp) PARTITIONED BY SPEC (times) stored +by iceberg tblproperties('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_timestamp +POSTHOOK: query: CREATE EXTERNAL TABLE ice_timestamp (`col1` int, `day` date, `times` timestamp) PARTITIONED BY SPEC (times) stored +by iceberg tblproperties('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_timestamp +PREHOOK: query: insert into ice_timestamp values(1, '2020-11-20', '2020-11-20'), (1, '2020-11-20', '2020-11-20') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_timestamp +POSTHOOK: query: insert into ice_timestamp values(1, '2020-11-20', '2020-11-20'), (1, '2020-11-20', '2020-11-20') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_timestamp +PREHOOK: query: select * from ice_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from ice_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 2020-11-20 2020-11-20 00:00:00 +1 2020-11-20 2020-11-20 00:00:00 +PREHOOK: query: select count(times) from ice_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select count(times) from ice_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +2 +PREHOOK: query: select distinct(times) from ice_timestamp +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_timestamp +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select distinct(times) from ice_timestamp +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_timestamp +POSTHOOK: Output: hdfs://### HDFS PATH ### +2020-11-20 00:00:00 +PREHOOK: query: CREATE EXTERNAL TABLE ice_decimal (`col1` int, `decimalA` decimal(5,2), `decimalC` decimal(5,2)) PARTITIONED BY SPEC +(decimalC) stored by iceberg tblproperties('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_decimal +POSTHOOK: query: CREATE EXTERNAL TABLE ice_decimal (`col1` int, `decimalA` decimal(5,2), `decimalC` decimal(5,2)) PARTITIONED BY SPEC +(decimalC) stored by iceberg tblproperties('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_decimal +PREHOOK: query: insert into ice_decimal values(1, 122.91, 102.21), (1, 12.32, 200.12) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_decimal +POSTHOOK: query: insert into ice_decimal values(1, 122.91, 102.21), (1, 12.32, 200.12) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_decimal +PREHOOK: query: select * from ice_decimal +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_decimal +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from ice_decimal +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_decimal +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 122.91 102.21 +1 12.32 200.12 +PREHOOK: query: select distinct(decimalc) from ice_decimal +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_decimal +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select distinct(decimalc) from ice_decimal +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_decimal +POSTHOOK: Output: hdfs://### HDFS PATH ### +102.21 +200.12 +PREHOOK: query: select count(decimala) from ice_decimal where decimala=122.91 +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_decimal +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select count(decimala) from ice_decimal where decimala=122.91 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_decimal +POSTHOOK: Output: hdfs://### HDFS PATH ### +1 diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_partition_evolution.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_partition_evolution.q.out new file mode 100644 index 000000000000..a5ead609d451 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_partition_evolution.q.out @@ -0,0 +1,377 @@ +PREHOOK: query: create table ice_orc ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_orc +POSTHOOK: query: create table ice_orc ( + first_name string, + last_name string, + dept_id bigint, + team_id bigint + ) +partitioned by (company_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn1','ln1', 1, 10, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn1','ln1', 1, 10, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn2','ln2', 1, 10, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn2','ln2', 1, 10, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn3','ln3', 1, 11, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn3','ln3', 1, 11, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: alter table ice_orc set partition spec(company_id, dept_id) +PREHOOK: type: ALTERTABLE_SETPARTSPEC +PREHOOK: Input: default@ice_orc +POSTHOOK: query: alter table ice_orc set partition spec(company_id, dept_id) +POSTHOOK: type: ALTERTABLE_SETPARTSPEC +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn4','ln4', 1, 11, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn4','ln4', 1, 11, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn5','ln5', 2, 20, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn5','ln5', 2, 20, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn6','ln6', 2, 20, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn6','ln6', 2, 20, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: alter table ice_orc set partition spec(company_id, dept_id, team_id) +PREHOOK: type: ALTERTABLE_SETPARTSPEC +PREHOOK: Input: default@ice_orc +POSTHOOK: query: alter table ice_orc set partition spec(company_id, dept_id, team_id) +POSTHOOK: type: ALTERTABLE_SETPARTSPEC +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn7','ln7', 2, 21, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn7','ln7', 2, 21, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn8','ln8', 2, 21, 100) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn8','ln8', 2, 21, 100) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: alter table ice_orc set partition spec(company_id, dept_id) +PREHOOK: type: ALTERTABLE_SETPARTSPEC +PREHOOK: Input: default@ice_orc +POSTHOOK: query: alter table ice_orc set partition spec(company_id, dept_id) +POSTHOOK: type: ALTERTABLE_SETPARTSPEC +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln8a' where first_name='fn8' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln8a' where first_name='fn8' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: delete from ice_orc where last_name in ('ln1a', 'ln8a') +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: delete from ice_orc where last_name in ('ln1a', 'ln8a') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn2 ln2a 1 10 100 +fn3 ln3a 1 11 100 +fn4 ln4a 1 11 100 +fn5 ln5a 2 20 100 +fn6 ln6a 2 20 100 +fn7 ln7a 2 21 100 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string +dept_id bigint +team_id bigint +company_id bigint + +# Partition Transform Information +# col_name transform_type +company_id IDENTITY +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"},{\"id\":4,\"name\":\"team_id\",\"required\":false,\"type\":\"long\"},{\"id\":5,\"name\":\"company_id\",\"required\":false,\"type\":\"long\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"deleted-data-files\":\"2\",\"deleted-records\":\"2\",\"removed-files-size\":\"1256\",\"changed-partition-count\":\"2\",\"total-records\":\"14\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"14\",\"total-delete-files\":\"8\",\"total-position-deletes\":\"8\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":1,\"fields\":[{\"name\":\"company_id\",\"transform\":\"identity\",\"source-id\":5,\"field-id\":1000},{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1001}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 14 + numRows 14 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 17 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Compact + compaction type: major + table name: default.ice_orc + numberOfBuckets: 0 + table name: default.ice_orc + blocking: true + +PREHOOK: query: alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn2 ln2a 1 10 100 +fn3 ln3a 1 11 100 +fn4 ln4a 1 11 100 +fn5 ln5a 2 20 100 +fn6 ln6a 2 20 100 +fn7 ln7a 2 21 100 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string +dept_id bigint +team_id bigint +company_id bigint + +# Partition Transform Information +# col_name transform_type +company_id IDENTITY +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"company_id\":\"true\",\"dept_id\":\"true\",\"first_name\":\"true\",\"last_name\":\"true\",\"team_id\":\"true\"}} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"},{\"id\":4,\"name\":\"team_id\",\"required\":false,\"type\":\"long\"},{\"id\":5,\"name\":\"company_id\",\"required\":false,\"type\":\"long\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"replace-partitions\":\"true\",\"added-data-files\":\"2\",\"added-records\":\"6\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"2\",\"total-records\":\"6\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"2\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":1,\"fields\":[{\"name\":\"company_id\",\"transform\":\"identity\",\"source-id\":5,\"field-id\":1000},{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1001}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 2 + numRows 6 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 19 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: show compactions +POSTHOOK: type: SHOW COMPACTIONS +CompactionId Database Table Partition Type State Worker host Worker Enqueue Time Start Time Duration(ms) HadoopJobId Error message Initiator host Initiator Pool name TxnId Next TxnId Commit Time Highest WriteId +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_partitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_partitioned.q.out new file mode 100644 index 000000000000..095957023144 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_partitioned.q.out @@ -0,0 +1,655 @@ +PREHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +partitioned by (dept_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_orc +POSTHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +partitioned by (dept_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn1','ln1', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn1','ln1', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn2','ln2', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn2','ln2', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn3','ln3', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn3','ln3', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn4','ln4', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn4','ln4', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn5','ln5', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn5','ln5', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn6','ln6', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn6','ln6', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn7','ln7', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn7','ln7', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: delete from ice_orc where last_name in ('ln1a', 'ln2a', 'ln7a') +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: delete from ice_orc where last_name in ('ln1a', 'ln2a', 'ln7a') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn3 ln3a 1 +fn4 ln4a 1 +fn5 ln5a 2 +fn6 ln6a 2 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string +dept_id bigint + +# Partition Transform Information +# col_name transform_type +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"deleted-data-files\":\"3\",\"deleted-records\":\"3\",\"removed-files-size\":\"1440\",\"changed-partition-count\":\"2\",\"total-records\":\"11\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"11\",\"total-delete-files\":\"7\",\"total-position-deletes\":\"7\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":0,\"fields\":[{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1000}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 11 + numRows 11 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 15 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Compact + compaction type: major + table name: default.ice_orc + numberOfBuckets: 0 + table name: default.ice_orc + blocking: true + +PREHOOK: query: alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn3 ln3a 1 +fn4 ln4a 1 +fn5 ln5a 2 +fn6 ln6a 2 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string +dept_id bigint + +# Partition Transform Information +# col_name transform_type +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"dept_id\":\"true\",\"first_name\":\"true\",\"last_name\":\"true\"}} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"replace-partitions\":\"true\",\"added-data-files\":\"2\",\"added-records\":\"4\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"2\",\"total-records\":\"4\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"2\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":0,\"fields\":[{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1000}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 2 + numRows 4 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 17 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: show compactions +POSTHOOK: type: SHOW COMPACTIONS +CompactionId Database Table Partition Type State Worker host Worker Enqueue Time Start Time Duration(ms) HadoopJobId Error message Initiator host Initiator Pool name TxnId Next TxnId Commit Time Highest WriteId +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- +PREHOOK: query: insert into ice_orc VALUES ('fn11','ln11', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn11','ln11', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn12','ln12', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn12','ln12', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn13','ln13', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn13','ln13', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn14','ln14', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn14','ln14', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn15','ln15', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn15','ln15', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn16','ln16', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn16','ln16', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn17','ln17', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn17','ln17', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn18','ln18', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn18','ln18', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln11a' where first_name='fn11' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln11a' where first_name='fn11' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln12a' where first_name='fn12' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln12a' where first_name='fn12' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln13a' where first_name='fn13' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln13a' where first_name='fn13' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln14a' where first_name='fn14' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln14a' where first_name='fn14' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln15a' where first_name='fn15' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln15a' where first_name='fn15' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln16a' where first_name='fn16' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln16a' where first_name='fn16' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln17a' where first_name='fn17' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln17a' where first_name='fn17' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln18a' where first_name='fn18' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln18a' where first_name='fn18' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: delete from ice_orc where last_name in ('ln11a', 'ln12a', 'ln17a', 'ln18a') +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: delete from ice_orc where last_name in ('ln11a', 'ln12a', 'ln17a', 'ln18a') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn13 ln13a 1 +fn14 ln14a 1 +fn15 ln15a 2 +fn16 ln16a 2 +fn3 ln3a 1 +fn4 ln4a 1 +fn5 ln5a 2 +fn6 ln6a 2 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string +dept_id bigint + +# Partition Transform Information +# col_name transform_type +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"deleted-data-files\":\"4\",\"deleted-records\":\"4\",\"removed-files-size\":\"1948\",\"changed-partition-count\":\"2\",\"total-records\":\"16\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"14\",\"total-delete-files\":\"8\",\"total-position-deletes\":\"8\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":0,\"fields\":[{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1000}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 14 + numRows 16 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 34 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Compact + compaction type: major + table name: default.ice_orc + numberOfBuckets: 0 + table name: default.ice_orc + blocking: true + +PREHOOK: query: alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn13 ln13a 1 +fn14 ln14a 1 +fn15 ln15a 2 +fn16 ln16a 2 +fn3 ln3a 1 +fn4 ln4a 1 +fn5 ln5a 2 +fn6 ln6a 2 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string +dept_id bigint + +# Partition Transform Information +# col_name transform_type +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"dept_id\":\"true\",\"first_name\":\"true\",\"last_name\":\"true\"}} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"replace-partitions\":\"true\",\"added-data-files\":\"2\",\"added-records\":\"8\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"2\",\"total-records\":\"8\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"2\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":0,\"fields\":[{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1000}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 2 + numRows 8 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 36 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: show compactions +POSTHOOK: type: SHOW COMPACTIONS +CompactionId Database Table Partition Type State Worker host Worker Enqueue Time Start Time Duration(ms) HadoopJobId Error message Initiator host Initiator Pool name TxnId Next TxnId Commit Time Highest WriteId +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_schema_evolution.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_schema_evolution.q.out new file mode 100644 index 000000000000..f2f453b34d09 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_schema_evolution.q.out @@ -0,0 +1,362 @@ +PREHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +partitioned by (dept_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_orc +POSTHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +partitioned by (dept_id bigint) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn1','ln1', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn1','ln1', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn2','ln2', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn2','ln2', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn3','ln3', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn3','ln3', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn4','ln4', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, dept_id) VALUES ('fn4','ln4', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: alter table ice_orc add columns (address string) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc add columns (address string) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn5','ln5', 'addr_5', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn5','ln5', 'addr_5', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn6','ln6', 'addr_6', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn6','ln6', 'addr_6', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn7','ln7', 'addr_7', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn7','ln7', 'addr_7', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn8','ln8', 'addr_8', 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc (first_name, last_name, address, dept_id) VALUES ('fn8','ln8', 'addr_8', 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: alter table ice_orc change first_name fname string +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc change first_name fname string +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln5a' where fname='fn5' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln5a' where fname='fn5' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln6a' where fname='fn6' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln6a' where fname='fn6' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln7a' where fname='fn7' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln7a' where fname='fn7' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln8a' where fname='fn8' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln8a' where fname='fn8' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: delete from ice_orc where fname in ('fn1', 'fn3', 'fn7') +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: delete from ice_orc where fname in ('fn1', 'fn3', 'fn7') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn2 ln2a 1 NULL +fn4 ln4a 1 NULL +fn5 ln5a 2 addr_5 +fn6 ln6a 2 addr_6 +fn8 ln8a 2 addr_8 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +fname string +last_name string +dept_id bigint +address string + +# Partition Transform Information +# col_name transform_type +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":2,\"fields\":[{\"id\":1,\"name\":\"fname\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"},{\"id\":4,\"name\":\"address\",\"required\":false,\"type\":\"string\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"deleted-data-files\":\"6\",\"deleted-records\":\"6\",\"removed-files-size\":\"3167\",\"changed-partition-count\":\"2\",\"total-records\":\"10\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"10\",\"total-delete-files\":\"8\",\"total-position-deletes\":\"8\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":0,\"fields\":[{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1000}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 10 + numRows 10 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 17 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Compact + compaction type: major + table name: default.ice_orc + numberOfBuckets: 0 + table name: default.ice_orc + blocking: true + +PREHOOK: query: alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn2 ln2a 1 NULL +fn4 ln4a 1 NULL +fn5 ln5a 2 addr_5 +fn6 ln6a 2 addr_6 +fn8 ln8a 2 addr_8 +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +fname string +last_name string +dept_id bigint +address string + +# Partition Transform Information +# col_name transform_type +dept_id IDENTITY + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"address\":\"true\",\"dept_id\":\"true\",\"fname\":\"true\",\"last_name\":\"true\"}} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":2,\"fields\":[{\"id\":1,\"name\":\"fname\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"},{\"id\":3,\"name\":\"dept_id\",\"required\":false,\"type\":\"long\"},{\"id\":4,\"name\":\"address\",\"required\":false,\"type\":\"string\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"replace-partitions\":\"true\",\"added-data-files\":\"2\",\"added-records\":\"5\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"2\",\"total-records\":\"5\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"2\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + default-partition-spec {\"spec-id\":0,\"fields\":[{\"name\":\"dept_id\",\"transform\":\"identity\",\"source-id\":3,\"field-id\":1000}]} + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 2 + numRows 5 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 19 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: show compactions +POSTHOOK: type: SHOW COMPACTIONS +CompactionId Database Table Partition Type State Worker host Worker Enqueue Time Start Time Duration(ms) HadoopJobId Error message Initiator host Initiator Pool name TxnId Next TxnId Commit Time Highest WriteId +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_unpartitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_unpartitioned.q.out new file mode 100644 index 000000000000..a061d210dbe5 --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_major_compaction_unpartitioned.q.out @@ -0,0 +1,310 @@ +PREHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_orc +POSTHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn1','ln1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn1','ln1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn2','ln2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn2','ln2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn3','ln3') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn3','ln3') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn4','ln4') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn4','ln4') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn5','ln5') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn5','ln5') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn6','ln6') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn6','ln6') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn7','ln7') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn7','ln7') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: delete from ice_orc where last_name in ('ln5a', 'ln6a', 'ln7a') +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: delete from ice_orc where last_name in ('ln5a', 'ln6a', 'ln7a') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn1 ln1a +fn2 ln2a +fn3 ln3a +fn4 ln4a +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"deleted-data-files\":\"3\",\"deleted-records\":\"3\",\"removed-files-size\":\"1131\",\"changed-partition-count\":\"1\",\"total-records\":\"11\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"11\",\"total-delete-files\":\"7\",\"total-position-deletes\":\"7\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 11 + numRows 11 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 15 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: explain alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Compact + compaction type: major + table name: default.ice_orc + numberOfBuckets: 0 + table name: default.ice_orc + blocking: true + +PREHOOK: query: alter table ice_orc COMPACT 'major' and wait +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: alter table ice_orc COMPACT 'major' and wait +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn1 ln1a +fn2 ln2a +fn3 ln3a +fn4 ln4a +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"first_name\":\"true\",\"last_name\":\"true\"}} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"replace-partitions\":\"true\",\"added-data-files\":\"1\",\"added-records\":\"4\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"1\",\"total-records\":\"4\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"1\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 1 + numRows 4 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 17 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: show compactions +POSTHOOK: type: SHOW COMPACTIONS +CompactionId Database Table Partition Type State Worker host Worker Enqueue Time Start Time Duration(ms) HadoopJobId Error message Initiator host Initiator Pool name TxnId Next TxnId Commit Time Highest WriteId +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- diff --git a/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_optimize_table_unpartitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_optimize_table_unpartitioned.q.out new file mode 100644 index 000000000000..a4ea671dd05d --- /dev/null +++ b/iceberg/iceberg-handler/src/test/results/positive/llap/iceberg_optimize_table_unpartitioned.q.out @@ -0,0 +1,310 @@ +PREHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ice_orc +POSTHOOK: query: create table ice_orc ( + first_name string, + last_name string + ) +stored by iceberg stored as orc +tblproperties ('format-version'='2') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn1','ln1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn1','ln1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn2','ln2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn2','ln2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn3','ln3') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn3','ln3') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn4','ln4') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn4','ln4') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn5','ln5') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn5','ln5') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn6','ln6') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn6','ln6') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: insert into ice_orc VALUES ('fn7','ln7') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@ice_orc +POSTHOOK: query: insert into ice_orc VALUES ('fn7','ln7') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln1a' where first_name='fn1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln2a' where first_name='fn2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln3a' where first_name='fn3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln4a' where first_name='fn4' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln5a' where first_name='fn5' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln6a' where first_name='fn6' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: update ice_orc set last_name = 'ln7a' where first_name='fn7' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: delete from ice_orc where last_name in ('ln5a', 'ln6a', 'ln7a') +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: delete from ice_orc where last_name in ('ln5a', 'ln6a', 'ln7a') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn1 ln1a +fn2 ln2a +fn3 ln3a +fn4 ln4a +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"deleted-data-files\":\"3\",\"deleted-records\":\"3\",\"removed-files-size\":\"1131\",\"changed-partition-count\":\"1\",\"total-records\":\"11\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"11\",\"total-delete-files\":\"7\",\"total-position-deletes\":\"7\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 11 + numRows 11 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 15 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: explain optimize table ice_orc rewrite data +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: explain optimize table ice_orc rewrite data +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Compact + compaction type: major + table name: default.ice_orc + numberOfBuckets: 0 + table name: default.ice_orc + blocking: true + +PREHOOK: query: optimize table ice_orc rewrite data +PREHOOK: type: ALTERTABLE_COMPACT +PREHOOK: Input: default@ice_orc +PREHOOK: Output: default@ice_orc +POSTHOOK: query: optimize table ice_orc rewrite data +POSTHOOK: type: ALTERTABLE_COMPACT +POSTHOOK: Input: default@ice_orc +POSTHOOK: Output: default@ice_orc +PREHOOK: query: select * from ice_orc +PREHOOK: type: QUERY +PREHOOK: Input: default@ice_orc +#### A masked pattern was here #### +POSTHOOK: query: select * from ice_orc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ice_orc +#### A masked pattern was here #### +fn1 ln1a +fn2 ln2a +fn3 ln3a +fn4 ln4a +PREHOOK: query: describe formatted ice_orc +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@ice_orc +POSTHOOK: query: describe formatted ice_orc +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@ice_orc +# col_name data_type comment +first_name string +last_name string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"first_name\":\"true\",\"last_name\":\"true\"}} + EXTERNAL TRUE + bucketing_version 2 + current-schema {\"type\":\"struct\",\"schema-id\":0,\"fields\":[{\"id\":1,\"name\":\"first_name\",\"required\":false,\"type\":\"string\"},{\"id\":2,\"name\":\"last_name\",\"required\":false,\"type\":\"string\"}]} + current-snapshot-id #Masked# + current-snapshot-summary {\"replace-partitions\":\"true\",\"added-data-files\":\"1\",\"added-records\":\"4\",\"added-files-size\":\"#Masked#\",\"changed-partition-count\":\"1\",\"total-records\":\"4\",\"total-files-size\":\"#Masked#\",\"total-data-files\":\"1\",\"total-delete-files\":\"0\",\"total-position-deletes\":\"0\",\"total-equality-deletes\":\"0\"} + current-snapshot-timestamp-ms #Masked# + format-version 2 + iceberg.orc.files.only true +#### A masked pattern was here #### + numFiles 1 + numRows 4 + parquet.compression zstd +#### A masked pattern was here #### + rawDataSize 0 + serialization.format 1 + snapshot-count 17 + storage_handler org.apache.iceberg.mr.hive.HiveIcebergStorageHandler + table_type ICEBERG + totalSize #Masked# +#### A masked pattern was here #### + uuid #Masked# + write.delete.mode merge-on-read + write.format.default orc + write.merge.mode merge-on-read + write.update.mode merge-on-read + +# Storage Information +SerDe Library: org.apache.iceberg.mr.hive.HiveIcebergSerDe +InputFormat: org.apache.iceberg.mr.hive.HiveIcebergInputFormat +OutputFormat: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat +Compressed: No +Sort Columns: [] +PREHOOK: query: show compactions +PREHOOK: type: SHOW COMPACTIONS +POSTHOOK: query: show compactions +POSTHOOK: type: SHOW COMPACTIONS +CompactionId Database Table Partition Type State Worker host Worker Enqueue Time Start Time Duration(ms) HadoopJobId Error message Initiator host Initiator Pool name TxnId Next TxnId Commit Time Highest WriteId +#Masked# default ice_orc --- MAJOR succeeded #Masked# manual default 0 0 0 --- diff --git a/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_partitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_partitioned.q.out index 1227db6101d0..f22d0bb16200 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_partitioned.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_partitioned.q.out @@ -200,7 +200,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col4 IS DISTINCT FROM _col7) (type: boolean) + predicate: ((_col4 <> _col7) or _col4 is null or _col7 is null) (type: boolean) Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) @@ -228,18 +228,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 302 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 7 Data size: 2096 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 4 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -322,10 +322,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 7 Data size: 2096 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 8 @@ -379,10 +379,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 2 Data size: 598 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 7 Data size: 2096 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Union 3 @@ -511,18 +511,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 9 Data size: 900 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 9 Data size: 900 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 3 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED diff --git a/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_unpartitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_unpartitioned.q.out index 6728fbf39bf7..14a9fd4c52b6 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_unpartitioned.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_copy_on_write_unpartitioned.q.out @@ -45,6 +45,357 @@ POSTHOOK: Output: default@source POSTHOOK: Lineage: source.a SCRIPT [] POSTHOOK: Lineage: source.b SCRIPT [] POSTHOOK: Lineage: source.c SCRIPT [] +PREHOOK: query: explain +merge into target_ice as t using source src ON t.a = src.a +when matched and t.a > 100 THEN DELETE +when not matched then insert values (src.a, src.b, src.c) +PREHOOK: type: QUERY +PREHOOK: Input: default@source +PREHOOK: Input: default@target_ice +PREHOOK: Output: default@target_ice +POSTHOOK: query: explain +merge into target_ice as t using source src ON t.a = src.a +when matched and t.a > 100 THEN DELETE +when not matched then insert values (src.a, src.b, src.c) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@source +POSTHOOK: Input: default@target_ice +POSTHOOK: Output: default@target_ice +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 10 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 10 (SIMPLE_EDGE) + Reducer 5 <- Reducer 4 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 6 <- Map 1 (SIMPLE_EDGE), Map 10 (SIMPLE_EDGE) + Reducer 7 <- Reducer 6 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 8 <- Map 1 (SIMPLE_EDGE), Map 10 (SIMPLE_EDGE) + Reducer 9 <- Reducer 8 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: target_ice + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint), a (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4 Data size: 832 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col4 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col4 (type: int) + Statistics: Num rows: 4 Data size: 832 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint) + Filter Operator + predicate: FILE__PATH is not null (type: boolean) + Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint), a (type: int), b (type: string), c (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col4 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col4 (type: int) + Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col5 (type: string), _col6 (type: int) + Filter Operator + predicate: (a > 100) (type: boolean) + Statistics: Num rows: 3 Data size: 285 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), a (type: int), b (type: string), c (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 3 Data size: 873 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col3 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col3 (type: int) + Statistics: Num rows: 3 Data size: 873 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col4 (type: string), _col5 (type: int) + Filter Operator + predicate: ((a > 100) and FILE__PATH is not null) (type: boolean) + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: FILE__PATH (type: string), a (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col1 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col1 (type: int) + Statistics: Num rows: 3 Data size: 564 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string) + Execution mode: vectorized + Map 10 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: string), c (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string), _col2 (type: int) + Filter Operator + predicate: (a > 100) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reducer 2 + Reduce Operator Tree: + Merge Join Operator + condition map: + Full Outer Join 0 to 1 + keys: + 0 _col4 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 14 Data size: 1520 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: _col4 is null (type: boolean) + Statistics: Num rows: 10 Data size: 1216 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col5 (type: int), _col6 (type: string), _col7 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 10 Data size: 1200 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 13 Data size: 2097 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.target_ice + Reducer 4 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join 0 to 1 + keys: + 0 _col4 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((((_col4 <> _col7) or (_col4 <= 100)) and _col4 is not null) or (((_col4 = _col7) and (_col4 > 100)) or _col4 is null) is null) (type: boolean) + Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 8 Data size: 2392 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col2 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: string) + Statistics: Num rows: 8 Data size: 2392 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + Reducer 5 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col2 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 2 Data size: 598 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 13 Data size: 2097 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.target_ice + Reducer 6 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col3 (type: int) + outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col3 (type: string) + null sort order: a + sort order: + + Map-reduce partition columns: _col3 (type: string) + Statistics: Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: int), _col2 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + Reducer 7 + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: VALUE._col1 (type: int), VALUE._col2 (type: bigint), KEY.reducesinkkey0 (type: string), VALUE._col3 (type: int), VALUE._col4 (type: string), VALUE._col5 (type: int) + outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col3 ASC NULLS FIRST + partition by: _col3 + raw input shape: + window functions: + window function definition + alias: row_number_window_0 + name: row_number + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (row_number_window_0 = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 291 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col1 (type: int), _col2 (type: bigint), _col3 (type: string), -1L (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 1 Data size: 299 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 13 Data size: 2097 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.target_ice + Reducer 8 + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col1 (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col1 (type: string) + null sort order: a + sort order: + + Map-reduce partition columns: _col1 (type: string) + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 9 + Execution mode: vectorized + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + output shape: _col1: string + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col1 ASC NULLS FIRST + partition by: _col1 + raw input shape: + window functions: + window function definition + alias: row_number_window_0 + name: row_number + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (row_number_window_0 = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col1 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + keys: _col0 (type: string) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE + Union 3 + Vertex: Union 3 + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.target_ice + + Stage: Stage-3 + Stats Work + Basic Stats Work: + PREHOOK: query: explain merge into target_ice as t using source src ON t.a = src.a when matched and t.a > 100 THEN DELETE @@ -74,13 +425,13 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 12 <- Map 11 (SIMPLE_EDGE), Map 13 (SIMPLE_EDGE), Union 3 (CONTAINS) - Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 10 (SIMPLE_EDGE), Union 3 (CONTAINS) - Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 11 (SIMPLE_EDGE) + Reducer 11 <- Map 10 (SIMPLE_EDGE), Map 12 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 13 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 4 <- Map 1 (SIMPLE_EDGE), Map 13 (SIMPLE_EDGE) Reducer 5 <- Reducer 4 (SIMPLE_EDGE), Reducer 7 (SIMPLE_EDGE), Union 3 (CONTAINS) - Reducer 6 <- Map 1 (SIMPLE_EDGE), Map 11 (SIMPLE_EDGE) + Reducer 6 <- Map 1 (SIMPLE_EDGE), Map 13 (SIMPLE_EDGE) Reducer 7 <- Reducer 6 (SIMPLE_EDGE) - Reducer 8 <- Map 1 (SIMPLE_EDGE), Map 11 (SIMPLE_EDGE) + Reducer 8 <- Map 1 (SIMPLE_EDGE), Map 13 (SIMPLE_EDGE) Reducer 9 <- Reducer 8 (SIMPLE_EDGE), Union 3 (CONTAINS) #### A masked pattern was here #### Vertices: @@ -133,8 +484,55 @@ STAGE PLANS: Map Operator Tree: TableScan alias: target_ice - filterExpr: (a <= 100) (type: boolean) - Statistics: Num rows: 4 Data size: 32 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint), a (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4 Data size: 832 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col4 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col4 (type: int) + Statistics: Num rows: 4 Data size: 832 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint) + Execution mode: vectorized + Map 12 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: string), c (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string), _col2 (type: int) + Execution mode: vectorized + Map 13 + Map Operator Tree: + TableScan + alias: target_ice + Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: FILE__PATH is not null (type: boolean) + Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint), a (type: int), b (type: string), c (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col4 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col4 (type: int) + Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col5 (type: string), _col6 (type: int) Filter Operator predicate: (a <= 100) (type: boolean) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE @@ -149,23 +547,6 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: int) Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col5 (type: int) - Execution mode: vectorized - Map 11 - Map Operator Tree: - TableScan - alias: target_ice - Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint), a (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 4 Data size: 832 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col4 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col4 (type: int) - Statistics: Num rows: 4 Data size: 832 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint) Filter Operator predicate: (a is not null and FILE__PATH is not null) (type: boolean) Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE @@ -180,20 +561,6 @@ STAGE PLANS: Map-reduce partition columns: _col1 (type: int) Statistics: Num rows: 4 Data size: 752 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: string) - Filter Operator - predicate: (FILE__PATH is not null and a is not null) (type: boolean) - Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint), a (type: int), b (type: string), c (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col4 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col4 (type: int) - Statistics: Num rows: 4 Data size: 1196 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col5 (type: string), _col6 (type: int) Filter Operator predicate: a is not null (type: boolean) Statistics: Num rows: 4 Data size: 380 Basic stats: COMPLETE Column stats: COMPLETE @@ -209,24 +576,7 @@ STAGE PLANS: Statistics: Num rows: 4 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col4 (type: string), _col5 (type: int) Execution mode: vectorized - Map 13 - Map Operator Tree: - TableScan - alias: src - Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), b (type: string), c (type: int) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string), _col2 (type: int) - Execution mode: vectorized - Reducer 12 + Reducer 11 Reduce Operator Tree: Merge Join Operator condition map: @@ -284,7 +634,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col4 IS DISTINCT FROM _col7) (type: boolean) + predicate: ((_col4 <> _col7) or ((_col4 = _col7) or _col4 is null) is null) (type: boolean) Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) @@ -627,7 +977,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 12 Data size: 3616 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (_col4 IS DISTINCT FROM _col7) (type: boolean) + predicate: ((_col4 <> _col7) or _col4 is null or _col7 is null) (type: boolean) Statistics: Num rows: 12 Data size: 3616 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) diff --git a/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_partitioned_orc.q.out b/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_partitioned_orc.q.out index f8cc1fa15596..f20856fd291c 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_partitioned_orc.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/merge_iceberg_partitioned_orc.q.out @@ -162,10 +162,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col0, 16) (type: int), _col1 (type: string) + key expressions: iceberg_bucket(_col0, 16) (type: int), iceberg_truncate(_col1, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col0, 16) (type: int), _col1 (type: string) + Map-reduce partition columns: iceberg_bucket(_col0, 16) (type: int), iceberg_truncate(_col1, 3) (type: string) Statistics: Num rows: 1 Data size: 98 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) Filter Operator @@ -176,10 +176,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col0, 16) (type: int), _col1 (type: string) + key expressions: iceberg_bucket(_col0, 16) (type: int), iceberg_truncate(_col1, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col0, 16) (type: int), _col1 (type: string) + Map-reduce partition columns: iceberg_bucket(_col0, 16) (type: int), iceberg_truncate(_col1, 3) (type: string) Statistics: Num rows: 6 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int) Filter Operator @@ -237,8 +237,8 @@ STAGE PLANS: Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), KEY.iceberg_bucket(_col0, 16) (type: int), KEY._col1 (type: string) - outputColumnNames: _col0, _col1, _col2, iceberg_bucket(_col0, 16), _col1 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), KEY.iceberg_bucket(_col0, 16) (type: int), KEY.iceberg_truncate(_col1, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, iceberg_bucket(_col0, 16), iceberg_truncate(_col1, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -252,8 +252,8 @@ STAGE PLANS: Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), KEY.iceberg_bucket(_col0, 16) (type: int), KEY._col1 (type: string) - outputColumnNames: _col0, _col1, _col2, iceberg_bucket(_col0, 16), _col1 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: string), VALUE._col2 (type: int), KEY.iceberg_bucket(_col0, 16) (type: int), KEY.iceberg_truncate(_col1, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, iceberg_bucket(_col0, 16), iceberg_truncate(_col1, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED diff --git a/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_partitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_partitioned.q.out index 985c32f342ea..579504cb11ed 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_partitioned.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_partitioned.q.out @@ -60,10 +60,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 4 Data size: 1212 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Execution mode: vectorized @@ -71,10 +71,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_ice - filterExpr: ((a <> 22) and (b <> 'one') and (b <> 'four')) (type: boolean) + filterExpr: (((b <> 'one') and (b <> 'four') and (a <> 22)) or ((a = 22) or (b) IN ('one', 'four')) is null) (type: boolean) Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((a <> 22) and (b <> 'one') and (b <> 'four') and FILE__PATH is not null) (type: boolean) + predicate: ((((b <> 'one') and (b <> 'four') and (a <> 22)) or ((a = 22) or (b) IN ('one', 'four')) is null) and FILE__PATH is not null) (type: boolean) Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) @@ -118,8 +118,8 @@ STAGE PLANS: Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -144,10 +144,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 7 @@ -184,10 +184,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 8 Data size: 2412 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 8 @@ -287,14 +287,14 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 800), (555, 'schola', POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@tbl_ice -Warning: Shuffle Join MERGEJOIN[281][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[282][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[295][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[286][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 10' is a cross product -Warning: Shuffle Join MERGEJOIN[283][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 14' is a cross product -Warning: Shuffle Join MERGEJOIN[288][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 16' is a cross product -Warning: Shuffle Join MERGEJOIN[303][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 13' is a cross product -Warning: Shuffle Join MERGEJOIN[280][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 19' is a cross product -Warning: Shuffle Join MERGEJOIN[293][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 21' is a cross product +Warning: Shuffle Join MERGEJOIN[287][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 10' is a cross product +Warning: Shuffle Join MERGEJOIN[289][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 12' is a cross product +Warning: Shuffle Join MERGEJOIN[284][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 14' is a cross product +Warning: Shuffle Join MERGEJOIN[293][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 16' is a cross product +Warning: Shuffle Join MERGEJOIN[281][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 19' is a cross product +Warning: Shuffle Join MERGEJOIN[291][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 21' is a cross product PREHOOK: query: explain update tbl_ice set b='Changed again' where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY PREHOOK: Input: default@tbl_ice @@ -314,36 +314,34 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 10 <- Reducer 30 (XPROD_EDGE), Reducer 9 (XPROD_EDGE) - Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 18 (SIMPLE_EDGE) - Reducer 12 <- Reducer 11 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE) - Reducer 13 <- Reducer 12 (XPROD_EDGE), Reducer 25 (XPROD_EDGE), Union 7 (CONTAINS) - Reducer 14 <- Map 1 (XPROD_EDGE), Reducer 32 (XPROD_EDGE) - Reducer 15 <- Reducer 14 (SIMPLE_EDGE), Reducer 29 (SIMPLE_EDGE) - Reducer 16 <- Reducer 15 (XPROD_EDGE), Reducer 24 (XPROD_EDGE) - Reducer 17 <- Reducer 16 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE) + Reducer 10 <- Reducer 29 (XPROD_EDGE), Reducer 9 (XPROD_EDGE) + Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 24 (SIMPLE_EDGE) + Reducer 12 <- Reducer 11 (XPROD_EDGE), Reducer 26 (XPROD_EDGE) + Reducer 13 <- Reducer 12 (SIMPLE_EDGE), Reducer 18 (SIMPLE_EDGE), Union 7 (CONTAINS) + Reducer 14 <- Map 1 (XPROD_EDGE), Reducer 28 (XPROD_EDGE) + Reducer 15 <- Reducer 14 (SIMPLE_EDGE), Reducer 27 (SIMPLE_EDGE) + Reducer 16 <- Reducer 15 (XPROD_EDGE), Reducer 23 (XPROD_EDGE) + Reducer 17 <- Reducer 16 (SIMPLE_EDGE), Reducer 24 (SIMPLE_EDGE) Reducer 18 <- Reducer 17 (SIMPLE_EDGE) - Reducer 19 <- Map 1 (XPROD_EDGE), Reducer 31 (XPROD_EDGE) + Reducer 19 <- Map 1 (XPROD_EDGE), Reducer 30 (XPROD_EDGE) Reducer 2 <- Map 1 (XPROD_EDGE), Reducer 28 (XPROD_EDGE) - Reducer 20 <- Reducer 19 (SIMPLE_EDGE), Reducer 29 (SIMPLE_EDGE) - Reducer 21 <- Reducer 20 (XPROD_EDGE), Reducer 26 (XPROD_EDGE) - Reducer 22 <- Reducer 21 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE), Union 7 (CONTAINS) - Reducer 23 <- Map 1 (SIMPLE_EDGE) - Reducer 24 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 20 <- Reducer 19 (SIMPLE_EDGE), Reducer 27 (SIMPLE_EDGE) + Reducer 21 <- Reducer 20 (XPROD_EDGE), Reducer 23 (XPROD_EDGE) + Reducer 22 <- Reducer 21 (SIMPLE_EDGE), Reducer 24 (SIMPLE_EDGE), Union 7 (CONTAINS) + Reducer 23 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 24 <- Map 1 (SIMPLE_EDGE) Reducer 25 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 26 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 27 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 27 <- Map 1 (SIMPLE_EDGE) Reducer 28 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 29 <- Map 1 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 29 (SIMPLE_EDGE) + Reducer 29 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 27 (SIMPLE_EDGE) Reducer 30 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 31 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 32 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 4 <- Reducer 27 (XPROD_EDGE), Reducer 3 (XPROD_EDGE) - Reducer 5 <- Reducer 23 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) + Reducer 4 <- Reducer 25 (XPROD_EDGE), Reducer 3 (XPROD_EDGE) + Reducer 5 <- Reducer 24 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) Reducer 6 <- Reducer 5 (SIMPLE_EDGE), Union 7 (CONTAINS) Reducer 8 <- Union 7 (SIMPLE_EDGE) - Reducer 9 <- Map 1 (SIMPLE_EDGE), Reducer 29 (SIMPLE_EDGE) + Reducer 9 <- Map 1 (SIMPLE_EDGE), Reducer 27 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -395,6 +393,19 @@ STAGE PLANS: Filter Operator predicate: (c > 800) (type: boolean) Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + minReductionHashAggr: 0.8888889 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Group By Operator keys: c (type: int) minReductionHashAggr: 0.7777778 @@ -431,48 +442,9 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Select Operator - Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.8888889 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Select Operator - Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.8888889 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Filter Operator predicate: (a <= 5) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Group By Operator keys: a (type: int) minReductionHashAggr: 0.4 @@ -485,17 +457,6 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(), count(a) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint), _col1 (type: bigint) Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -509,6 +470,17 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) + Group By Operator + aggregations: count(), count(a) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint), _col1 (type: bigint) Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -537,38 +509,14 @@ STAGE PLANS: expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col8 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 9 Data size: 2877 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + Reduce Output Operator + key expressions: _col2 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 9 Data size: 2877 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col5 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col5 (type: string) - Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean) Reducer 11 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 _col5 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col2 (type: int) - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - Reducer 12 Reduce Operator Tree: Merge Join Operator condition map: @@ -576,14 +524,14 @@ STAGE PLANS: keys: 0 _col2 (type: int) 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12 - Statistics: Num rows: 6 Data size: 1830 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12 + Statistics: Num rows: 9 Data size: 2913 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 6 Data size: 1830 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col12 (type: boolean) - Reducer 13 + Statistics: Num rows: 9 Data size: 2913 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col12 (type: boolean) + Reducer 12 Reduce Operator Tree: Merge Join Operator condition map: @@ -591,26 +539,47 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12, _col13, _col14 - Statistics: Num rows: 6 Data size: 1926 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12, _col13, _col14 + Statistics: Num rows: 9 Data size: 3057 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col11, _col12, _col14 - Statistics: Num rows: 6 Data size: 1926 Basic stats: COMPLETE Column stats: COMPLETE + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col12, _col14 + Statistics: Num rows: 9 Data size: 3057 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)) (type: boolean) - Statistics: Num rows: 6 Data size: 1926 Basic stats: COMPLETE Column stats: COMPLETE + predicate: (((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null) or (_col14 is not null and (_col11 <> 0L)) or ((_col2 is null or (_col12 < _col11)) and null and (_col11 <> 0L) and _col14 is null)) is null or (((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) and ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)))) (type: boolean) + Statistics: Num rows: 9 Data size: 3057 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) - null sort order: aa - sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) - Statistics: Num rows: 19 Data size: 5791 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + key expressions: _col5 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col5 (type: string) + Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + Reducer 13 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col5 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) + null sort order: aa + sort order: ++ + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) + Statistics: Num rows: 19 Data size: 5791 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 14 Reduce Operator Tree: Merge Join Operator @@ -809,13 +778,31 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 9 Data size: 2781 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 19 Data size: 5791 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 23 + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 24 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -855,7 +842,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) - Reducer 24 + Reducer 25 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -868,7 +855,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Reducer 25 + Reducer 26 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -881,46 +868,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Reducer 26 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Reducer 27 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 28 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 29 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -960,22 +908,25 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) - Reducer 3 + Reducer 28 + Execution mode: vectorized Reduce Operator Tree: - Merge Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col8 - Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col8 (type: boolean) - Reducer 30 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 29 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -988,20 +939,22 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Reducer 31 - Execution mode: vectorized + Reducer 3 Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Merge Join Operator + condition map: + Left Outer Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col8 + Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 32 + Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col8 (type: boolean) + Reducer 30 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -1085,18 +1038,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 4 Data size: 1204 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 19 Data size: 5791 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 8 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -1141,14 +1094,14 @@ STAGE PLANS: Stats Work Basic Stats Work: -Warning: Shuffle Join MERGEJOIN[281][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[282][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[295][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[286][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 10' is a cross product -Warning: Shuffle Join MERGEJOIN[283][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 14' is a cross product -Warning: Shuffle Join MERGEJOIN[288][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 16' is a cross product -Warning: Shuffle Join MERGEJOIN[303][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 13' is a cross product -Warning: Shuffle Join MERGEJOIN[280][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 19' is a cross product -Warning: Shuffle Join MERGEJOIN[293][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 21' is a cross product +Warning: Shuffle Join MERGEJOIN[287][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 10' is a cross product +Warning: Shuffle Join MERGEJOIN[289][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 12' is a cross product +Warning: Shuffle Join MERGEJOIN[284][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 14' is a cross product +Warning: Shuffle Join MERGEJOIN[293][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 16' is a cross product +Warning: Shuffle Join MERGEJOIN[281][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 19' is a cross product +Warning: Shuffle Join MERGEJOIN[291][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 21' is a cross product PREHOOK: query: update tbl_ice set b='Changed again' where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY PREHOOK: Input: default@tbl_ice @@ -1227,9 +1180,9 @@ STAGE PLANS: Reducer 3 <- Reducer 14 (XPROD_EDGE), Reducer 2 (XPROD_EDGE) Reducer 4 <- Reducer 13 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE), Union 5 (CONTAINS) Reducer 6 <- Union 5 (SIMPLE_EDGE) - Reducer 7 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE) - Reducer 8 <- Reducer 7 (SIMPLE_EDGE), Union 5 (CONTAINS) - Reducer 9 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE), Union 5 (CONTAINS) + Reducer 7 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE), Union 5 (CONTAINS) + Reducer 8 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE) + Reducer 9 <- Reducer 8 (SIMPLE_EDGE), Union 5 (CONTAINS) #### A masked pattern was here #### Vertices: Map 1 @@ -1253,29 +1206,29 @@ STAGE PLANS: value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 9 Data size: 873 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) + expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) Select Operator - expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) + expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) Select Operator expressions: a (type: int) outputColumnNames: _col0 @@ -1496,7 +1449,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null) (type: boolean) Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -1524,18 +1477,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 3 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 6 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -1546,6 +1499,27 @@ STAGE PLANS: serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe name: default.tbl_ice Reducer 7 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint), _col0 (type: int), 'Changed forever' (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 1 Data size: 311 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) + null sort order: aa + sort order: ++ + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) + Statistics: Num rows: 3 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) + Reducer 8 Reduce Operator Tree: Merge Join Operator condition map: @@ -1562,7 +1536,7 @@ STAGE PLANS: Map-reduce partition columns: _col5 (type: string) Statistics: Num rows: 1 Data size: 293 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint) - Reducer 8 + Reducer 9 Execution mode: vectorized Reduce Operator Tree: Select Operator @@ -1596,33 +1570,12 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 3 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) - Reducer 9 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint), _col0 (type: int), 'Changed forever' (type: string), _col1 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 1 Data size: 311 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) - null sort order: aa - sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) - Statistics: Num rows: 3 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Union 5 Vertex: Union 5 @@ -1772,29 +1725,29 @@ STAGE PLANS: value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 9 Data size: 873 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) + expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) Select Operator - expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) + expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) Select Operator expressions: a (type: int) outputColumnNames: _col0 @@ -1946,10 +1899,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 3 Data size: 910 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 2 @@ -2030,18 +1983,18 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 308 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 3 Data size: 910 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Reducer 5 Execution mode: vectorized Reduce Operator Tree: Select Operator - expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY._col5 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), _col5 + expressions: VALUE._col0 (type: int), VALUE._col1 (type: bigint), VALUE._col2 (type: string), VALUE._col3 (type: bigint), VALUE._col4 (type: int), VALUE._col5 (type: string), VALUE._col6 (type: int), KEY.iceberg_bucket(_col4, 16) (type: int), KEY.iceberg_truncate(_col5, 3) (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, iceberg_bucket(_col4, 16), iceberg_truncate(_col5, 3) File Output Operator compressed: false Dp Sort State: PARTITION_SORTED @@ -2100,7 +2053,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null) (type: boolean) Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -2128,10 +2081,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 1 Data size: 301 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator - key expressions: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + key expressions: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) null sort order: aa sort order: ++ - Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), _col5 (type: string) + Map-reduce partition columns: iceberg_bucket(_col4, 16) (type: int), iceberg_truncate(_col5, 3) (type: string) Statistics: Num rows: 3 Data size: 910 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: bigint), _col2 (type: string), _col3 (type: bigint), _col4 (type: int), _col5 (type: string), _col6 (type: int) Union 4 diff --git a/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_unpartitioned.q.out b/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_unpartitioned.q.out index 98c24ce144c9..2242fdae23a5 100644 --- a/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_unpartitioned.q.out +++ b/iceberg/iceberg-handler/src/test/results/positive/update_iceberg_copy_on_write_unpartitioned.q.out @@ -71,10 +71,10 @@ STAGE PLANS: Map Operator Tree: TableScan alias: tbl_ice - filterExpr: ((a <> 22) and (b <> 'one') and (b <> 'four')) (type: boolean) + filterExpr: (((b <> 'one') and (b <> 'four') and (a <> 22)) or ((a = 22) or (b) IN ('one', 'four')) is null) (type: boolean) Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((a <> 22) and (b <> 'one') and (b <> 'four') and FILE__PATH is not null) (type: boolean) + predicate: ((((b <> 'one') and (b <> 'four') and (a <> 22)) or ((a = 22) or (b) IN ('one', 'four')) is null) and FILE__PATH is not null) (type: boolean) Statistics: Num rows: 7 Data size: 672 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) @@ -274,14 +274,14 @@ POSTHOOK: query: insert into tbl_ice values (444, 'hola', 800), (555, 'schola', POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@tbl_ice -Warning: Shuffle Join MERGEJOIN[279][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[280][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[293][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[284][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 9' is a cross product -Warning: Shuffle Join MERGEJOIN[281][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 13' is a cross product -Warning: Shuffle Join MERGEJOIN[286][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 15' is a cross product -Warning: Shuffle Join MERGEJOIN[301][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 12' is a cross product -Warning: Shuffle Join MERGEJOIN[278][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 18' is a cross product -Warning: Shuffle Join MERGEJOIN[291][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 20' is a cross product +Warning: Shuffle Join MERGEJOIN[285][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product +Warning: Shuffle Join MERGEJOIN[287][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 11' is a cross product +Warning: Shuffle Join MERGEJOIN[282][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 13' is a cross product +Warning: Shuffle Join MERGEJOIN[291][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 15' is a cross product +Warning: Shuffle Join MERGEJOIN[279][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 18' is a cross product +Warning: Shuffle Join MERGEJOIN[289][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 20' is a cross product PREHOOK: query: explain update tbl_ice set b='Changed again' where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY PREHOOK: Input: default@tbl_ice @@ -301,35 +301,33 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 10 <- Reducer 17 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) - Reducer 11 <- Reducer 10 (SIMPLE_EDGE), Reducer 22 (SIMPLE_EDGE) - Reducer 12 <- Reducer 11 (XPROD_EDGE), Reducer 24 (XPROD_EDGE), Union 7 (CONTAINS) - Reducer 13 <- Map 1 (XPROD_EDGE), Reducer 31 (XPROD_EDGE) - Reducer 14 <- Reducer 13 (SIMPLE_EDGE), Reducer 28 (SIMPLE_EDGE) - Reducer 15 <- Reducer 14 (XPROD_EDGE), Reducer 23 (XPROD_EDGE) - Reducer 16 <- Reducer 15 (SIMPLE_EDGE), Reducer 22 (SIMPLE_EDGE) + Reducer 10 <- Reducer 23 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) + Reducer 11 <- Reducer 10 (XPROD_EDGE), Reducer 25 (XPROD_EDGE) + Reducer 12 <- Reducer 11 (SIMPLE_EDGE), Reducer 17 (SIMPLE_EDGE), Union 7 (CONTAINS) + Reducer 13 <- Map 1 (XPROD_EDGE), Reducer 27 (XPROD_EDGE) + Reducer 14 <- Reducer 13 (SIMPLE_EDGE), Reducer 26 (SIMPLE_EDGE) + Reducer 15 <- Reducer 14 (XPROD_EDGE), Reducer 22 (XPROD_EDGE) + Reducer 16 <- Reducer 15 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE) Reducer 17 <- Reducer 16 (SIMPLE_EDGE) - Reducer 18 <- Map 1 (XPROD_EDGE), Reducer 30 (XPROD_EDGE) - Reducer 19 <- Reducer 18 (SIMPLE_EDGE), Reducer 28 (SIMPLE_EDGE) + Reducer 18 <- Map 1 (XPROD_EDGE), Reducer 29 (XPROD_EDGE) + Reducer 19 <- Reducer 18 (SIMPLE_EDGE), Reducer 26 (SIMPLE_EDGE) Reducer 2 <- Map 1 (XPROD_EDGE), Reducer 27 (XPROD_EDGE) - Reducer 20 <- Reducer 19 (XPROD_EDGE), Reducer 25 (XPROD_EDGE) - Reducer 21 <- Reducer 20 (SIMPLE_EDGE), Reducer 22 (SIMPLE_EDGE), Union 7 (CONTAINS) - Reducer 22 <- Map 1 (SIMPLE_EDGE) - Reducer 23 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 20 <- Reducer 19 (XPROD_EDGE), Reducer 22 (XPROD_EDGE) + Reducer 21 <- Reducer 20 (SIMPLE_EDGE), Reducer 23 (SIMPLE_EDGE), Union 7 (CONTAINS) + Reducer 22 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 23 <- Map 1 (SIMPLE_EDGE) Reducer 24 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 25 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 26 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 26 <- Map 1 (SIMPLE_EDGE) Reducer 27 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 28 <- Map 1 (SIMPLE_EDGE) + Reducer 28 <- Map 1 (CUSTOM_SIMPLE_EDGE) Reducer 29 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 28 (SIMPLE_EDGE) - Reducer 30 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 31 <- Map 1 (CUSTOM_SIMPLE_EDGE) - Reducer 4 <- Reducer 26 (XPROD_EDGE), Reducer 3 (XPROD_EDGE) - Reducer 5 <- Reducer 22 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE), Reducer 26 (SIMPLE_EDGE) + Reducer 4 <- Reducer 24 (XPROD_EDGE), Reducer 3 (XPROD_EDGE) + Reducer 5 <- Reducer 23 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE) Reducer 6 <- Reducer 5 (SIMPLE_EDGE), Union 7 (CONTAINS) - Reducer 8 <- Map 1 (SIMPLE_EDGE), Reducer 28 (SIMPLE_EDGE) - Reducer 9 <- Reducer 29 (XPROD_EDGE), Reducer 8 (XPROD_EDGE) + Reducer 8 <- Map 1 (SIMPLE_EDGE), Reducer 26 (SIMPLE_EDGE) + Reducer 9 <- Reducer 28 (XPROD_EDGE), Reducer 8 (XPROD_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -381,6 +379,19 @@ STAGE PLANS: Filter Operator predicate: (c > 800) (type: boolean) Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + minReductionHashAggr: 0.8888889 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Group By Operator keys: c (type: int) minReductionHashAggr: 0.7777778 @@ -417,48 +428,9 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Select Operator - Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.8888889 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Select Operator - Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.8888889 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Filter Operator predicate: (a <= 5) (type: boolean) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Group By Operator keys: a (type: int) minReductionHashAggr: 0.4 @@ -471,17 +443,6 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(), count(a) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint), _col1 (type: bigint) Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -495,6 +456,17 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) + Group By Operator + aggregations: count(), count(a) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint), _col1 (type: bigint) Select Operator Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator @@ -510,23 +482,6 @@ STAGE PLANS: value expressions: _col0 (type: bigint) Execution mode: vectorized Reducer 10 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 _col5 (type: string) - 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col2 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col2 (type: int) - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - Reducer 11 Reduce Operator Tree: Merge Join Operator condition map: @@ -534,14 +489,14 @@ STAGE PLANS: keys: 0 _col2 (type: int) 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12 - Statistics: Num rows: 6 Data size: 1830 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12 + Statistics: Num rows: 9 Data size: 2913 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 6 Data size: 1830 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col12 (type: boolean) - Reducer 12 + Statistics: Num rows: 9 Data size: 2913 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col12 (type: boolean) + Reducer 11 Reduce Operator Tree: Merge Join Operator condition map: @@ -549,27 +504,48 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col12, _col13, _col14 - Statistics: Num rows: 6 Data size: 1926 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col12, _col13, _col14 + Statistics: Num rows: 9 Data size: 3057 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col11, _col12, _col14 - Statistics: Num rows: 6 Data size: 1926 Basic stats: COMPLETE Column stats: COMPLETE + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean), _col13 (type: bigint), _col14 (type: bigint), _col12 (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10, _col11, _col12, _col14 + Statistics: Num rows: 9 Data size: 3057 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)) (type: boolean) - Statistics: Num rows: 6 Data size: 1926 Basic stats: COMPLETE Column stats: COMPLETE + predicate: (((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null) or (_col14 is not null and (_col11 <> 0L)) or ((_col2 is null or (_col12 < _col11)) and null and (_col11 <> 0L) and _col14 is null)) is null or (((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) and ((_col11 = 0L) or (_col14 is null and (_col12 >= _col11) and _col2 is not null)))) (type: boolean) + Statistics: Num rows: 9 Data size: 3057 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 19 Data size: 5791 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat - output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat - serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe - name: default.tbl_ice + Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col5 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col5 (type: string) + Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + Reducer 12 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col5 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col0 (type: int), _col1 (type: string), _col2 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 6 Data size: 1806 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 19 Data size: 5791 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.tbl_ice Reducer 13 Reduce Operator Tree: Merge Join Operator @@ -776,6 +752,24 @@ STAGE PLANS: serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe name: default.tbl_ice Reducer 22 + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 23 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -815,7 +809,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) - Reducer 23 + Reducer 24 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -828,7 +822,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Reducer 24 + Reducer 25 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -841,46 +835,7 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint), _col1 (type: bigint) - Reducer 25 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) Reducer 26 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 27 - Execution mode: vectorized - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 28 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -920,48 +875,38 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: boolean) - Reducer 29 + Reducer 27 Execution mode: vectorized Reduce Operator Tree: Group By Operator - aggregations: count(VALUE._col0), count(VALUE._col1) + aggregations: count(VALUE._col0) mode: mergepartial - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint), _col1 (type: bigint) - Reducer 3 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col8 - Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col8 (type: boolean) - Reducer 30 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 28 Execution mode: vectorized Reduce Operator Tree: Group By Operator - aggregations: count(VALUE._col0) + aggregations: count(VALUE._col0), count(VALUE._col1) mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Reducer 31 + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint), _col1 (type: bigint) + Reducer 29 Execution mode: vectorized Reduce Operator Tree: Group By Operator @@ -974,6 +919,21 @@ STAGE PLANS: sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) + Reducer 3 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col8 + Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 9 Data size: 2733 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col8 (type: boolean) Reducer 4 Reduce Operator Tree: Merge Join Operator @@ -1081,20 +1041,13 @@ STAGE PLANS: expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col9 (type: bigint), _col10 (type: bigint), _col8 (type: boolean) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 9 Data size: 2877 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + Reduce Output Operator + key expressions: _col2 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: int) Statistics: Num rows: 9 Data size: 2877 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col5 (type: string) - null sort order: z - sort order: + - Map-reduce partition columns: _col5 (type: string) - Statistics: Num rows: 9 Data size: 2709 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col6 (type: bigint) + value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint), _col7 (type: bigint), _col8 (type: bigint), _col10 (type: boolean) Union 7 Vertex: Union 7 @@ -1115,14 +1068,14 @@ STAGE PLANS: Stats Work Basic Stats Work: -Warning: Shuffle Join MERGEJOIN[279][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product +Warning: Shuffle Join MERGEJOIN[280][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 2' is a cross product Warning: Shuffle Join MERGEJOIN[293][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 4' is a cross product -Warning: Shuffle Join MERGEJOIN[284][tables = [$hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 9' is a cross product -Warning: Shuffle Join MERGEJOIN[281][tables = [$hdt$_2, $hdt$_3]] in Stage 'Reducer 13' is a cross product -Warning: Shuffle Join MERGEJOIN[286][tables = [$hdt$_2, $hdt$_3, $hdt$_4, $hdt$_5]] in Stage 'Reducer 15' is a cross product -Warning: Shuffle Join MERGEJOIN[301][tables = [$hdt$_1, $hdt$_2, $hdt$_0, $hdt$_3]] in Stage 'Reducer 12' is a cross product -Warning: Shuffle Join MERGEJOIN[278][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 18' is a cross product -Warning: Shuffle Join MERGEJOIN[291][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 20' is a cross product +Warning: Shuffle Join MERGEJOIN[285][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 9' is a cross product +Warning: Shuffle Join MERGEJOIN[287][tables = [$hdt$_0, $hdt$_1, $hdt$_2]] in Stage 'Reducer 11' is a cross product +Warning: Shuffle Join MERGEJOIN[282][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 13' is a cross product +Warning: Shuffle Join MERGEJOIN[291][tables = [$hdt$_1, $hdt$_2, $hdt$_3, $hdt$_4]] in Stage 'Reducer 15' is a cross product +Warning: Shuffle Join MERGEJOIN[279][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 18' is a cross product +Warning: Shuffle Join MERGEJOIN[289][tables = [$hdt$_0, $hdt$_1, $hdt$_2, $hdt$_3]] in Stage 'Reducer 20' is a cross product PREHOOK: query: update tbl_ice set b='Changed again' where a in (select a from tbl_ice where a <= 5) or c in (select c from tbl_ice where c > 800) PREHOOK: type: QUERY PREHOOK: Input: default@tbl_ice @@ -1199,9 +1152,9 @@ STAGE PLANS: Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE) Reducer 3 <- Reducer 13 (XPROD_EDGE), Reducer 2 (XPROD_EDGE) Reducer 4 <- Reducer 12 (SIMPLE_EDGE), Reducer 3 (SIMPLE_EDGE), Union 5 (CONTAINS) - Reducer 6 <- Map 1 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) - Reducer 7 <- Reducer 6 (SIMPLE_EDGE), Union 5 (CONTAINS) - Reducer 8 <- Map 1 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE), Union 5 (CONTAINS) + Reducer 6 <- Map 1 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE), Union 5 (CONTAINS) + Reducer 7 <- Map 1 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) + Reducer 8 <- Reducer 7 (SIMPLE_EDGE), Union 5 (CONTAINS) Reducer 9 <- Map 1 (SIMPLE_EDGE), Map 14 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: @@ -1226,29 +1179,29 @@ STAGE PLANS: value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 9 Data size: 873 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) + expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) Select Operator - expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) + expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) Select Operator expressions: a (type: int) outputColumnNames: _col0 @@ -1418,7 +1371,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null) (type: boolean) Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) @@ -1454,6 +1407,28 @@ STAGE PLANS: serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe name: default.tbl_ice Reducer 6 + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint), _col0 (type: int), 'Changed forever' (type: string), _col1 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 + Statistics: Num rows: 1 Data size: 311 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat + output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat + serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe + name: default.tbl_ice + Reducer 7 Reduce Operator Tree: Merge Join Operator condition map: @@ -1470,7 +1445,7 @@ STAGE PLANS: Map-reduce partition columns: _col5 (type: string) Statistics: Num rows: 1 Data size: 293 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint) - Reducer 7 + Reducer 8 Execution mode: vectorized Reduce Operator Tree: Select Operator @@ -1511,28 +1486,6 @@ STAGE PLANS: output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe name: default.tbl_ice - Reducer 8 - Reduce Operator Tree: - Merge Join Operator - condition map: - Left Semi Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint), _col0 (type: int), 'Changed forever' (type: string), _col1 (type: int) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 - Statistics: Num rows: 1 Data size: 311 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 3 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.iceberg.mr.hive.HiveIcebergInputFormat - output format: org.apache.iceberg.mr.hive.HiveIcebergOutputFormat - serde: org.apache.iceberg.mr.hive.HiveIcebergSerDe - name: default.tbl_ice Reducer 9 Reduce Operator Tree: Merge Join Operator @@ -1732,29 +1685,29 @@ STAGE PLANS: value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 9 Data size: 873 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) + expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) + Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) Select Operator - expressions: a (type: int), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string), ROW__POSITION (type: bigint) + expressions: a (type: int), b (type: string), c (type: int), PARTITION__SPEC__ID (type: int), PARTITION__HASH (type: bigint), FILE__PATH (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 9 Data size: 1908 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: int), _col2 (type: int), _col3 (type: bigint), _col4 (type: string), _col5 (type: bigint) + Statistics: Num rows: 9 Data size: 2637 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string) Select Operator expressions: a (type: int) outputColumnNames: _col0 @@ -2034,7 +1987,7 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col10 Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null)) (type: boolean) + predicate: ((_col7 = 0L) or (_col10 is null and (_col8 >= _col7) and _col0 is not null) or ((_col10 is not null and (_col7 <> 0L)) or ((_col0 is null or (_col8 < _col7)) and null and (_col7 <> 0L) and _col10 is null)) is null) (type: boolean) Statistics: Num rows: 10 Data size: 3178 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col2 (type: int), _col3 (type: int), _col4 (type: bigint), _col5 (type: string), _col6 (type: bigint) diff --git a/iceberg/iceberg-shading/pom.xml b/iceberg/iceberg-shading/pom.xml index bcb1c889efed..aa451b2574ec 100644 --- a/iceberg/iceberg-shading/pom.xml +++ b/iceberg/iceberg-shading/pom.xml @@ -14,7 +14,7 @@ org.apache.hive hive-iceberg - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 diff --git a/iceberg/patched-iceberg-api/pom.xml b/iceberg/patched-iceberg-api/pom.xml index 81264d3d2d13..35e305ded9f5 100644 --- a/iceberg/patched-iceberg-api/pom.xml +++ b/iceberg/patched-iceberg-api/pom.xml @@ -3,7 +3,7 @@ org.apache.hive hive-iceberg - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 diff --git a/iceberg/patched-iceberg-core/pom.xml b/iceberg/patched-iceberg-core/pom.xml index 9095ff93a26c..f3dfc5817d92 100644 --- a/iceberg/patched-iceberg-core/pom.xml +++ b/iceberg/patched-iceberg-core/pom.xml @@ -3,7 +3,7 @@ org.apache.hive hive-iceberg - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 @@ -83,7 +83,6 @@ **/HadoopInputFile.class **/SerializableTable.class - **/PartitionsTable.class diff --git a/iceberg/patched-iceberg-core/src/main/java/org/apache/iceberg/PartitionsTable.java b/iceberg/patched-iceberg-core/src/main/java/org/apache/iceberg/PartitionsTable.java deleted file mode 100644 index 904270353d69..000000000000 --- a/iceberg/patched-iceberg-core/src/main/java/org/apache/iceberg/PartitionsTable.java +++ /dev/null @@ -1,345 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.iceberg; - -import com.github.benmanes.caffeine.cache.Caffeine; -import com.github.benmanes.caffeine.cache.LoadingCache; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.List; -import org.apache.iceberg.expressions.ManifestEvaluator; -import org.apache.iceberg.io.CloseableIterable; -import org.apache.iceberg.relocated.com.google.common.annotations.VisibleForTesting; -import org.apache.iceberg.types.Types; -import org.apache.iceberg.util.ParallelIterable; -import org.apache.iceberg.util.PartitionUtil; -import org.apache.iceberg.util.StructLikeMap; - -/** A {@link Table} implementation that exposes a table's partitions as rows. */ -public class PartitionsTable extends BaseMetadataTable { - - private final Schema schema; - - private final boolean unpartitionedTable; - - PartitionsTable(Table table) { - this(table, table.name() + ".partitions"); - } - - PartitionsTable(Table table, String name) { - super(table, name); - - this.schema = - new Schema( - Types.NestedField.required(1, "partition", Partitioning.partitionType(table)), - Types.NestedField.required(4, "spec_id", Types.IntegerType.get()), - Types.NestedField.required( - 2, "record_count", Types.LongType.get(), "Count of records in data files"), - Types.NestedField.required( - 3, "file_count", Types.IntegerType.get(), "Count of data files"), - Types.NestedField.required( - 11, - "total_data_file_size_in_bytes", - Types.LongType.get(), - "Total size in bytes of data files"), - Types.NestedField.required( - 5, - "position_delete_record_count", - Types.LongType.get(), - "Count of records in position delete files"), - Types.NestedField.required( - 6, - "position_delete_file_count", - Types.IntegerType.get(), - "Count of position delete files"), - Types.NestedField.required( - 7, - "equality_delete_record_count", - Types.LongType.get(), - "Count of records in equality delete files"), - Types.NestedField.required( - 8, - "equality_delete_file_count", - Types.IntegerType.get(), - "Count of equality delete files"), - Types.NestedField.optional( - 9, - "last_updated_at", - Types.TimestampType.withZone(), - "Commit time of snapshot that last updated this partition"), - Types.NestedField.optional( - 10, - "last_updated_snapshot_id", - Types.LongType.get(), - "Id of snapshot that last updated this partition")); - this.unpartitionedTable = Partitioning.partitionType(table).fields().isEmpty(); - } - - @Override - public TableScan newScan() { - return new PartitionsScan(table()); - } - - @Override - public Schema schema() { - if (unpartitionedTable) { - return schema.select( - "record_count", - "file_count", - "total_data_file_size_in_bytes", - "position_delete_record_count", - "position_delete_file_count", - "equality_delete_record_count", - "equality_delete_file_count", - "last_updated_at", - "last_updated_snapshot_id"); - } - return schema; - } - - @Override - MetadataTableType metadataTableType() { - return MetadataTableType.PARTITIONS; - } - - private DataTask task(StaticTableScan scan) { - Iterable partitions = partitions(table(), scan); - if (unpartitionedTable) { - // the table is unpartitioned, partitions contains only the root partition - return StaticDataTask.of( - io().newInputFile(table().operations().current().metadataFileLocation()), - schema(), - scan.schema(), - partitions, - root -> - StaticDataTask.Row.of( - root.dataRecordCount, - root.dataFileCount, - root.dataFileSizeInBytes, - root.posDeleteRecordCount, - root.posDeleteFileCount, - root.eqDeleteRecordCount, - root.eqDeleteFileCount, - root.lastUpdatedAt, - root.lastUpdatedSnapshotId)); - } else { - return StaticDataTask.of( - io().newInputFile(table().operations().current().metadataFileLocation()), - schema(), - scan.schema(), - partitions, - PartitionsTable::convertPartition); - } - } - - private static StaticDataTask.Row convertPartition(Partition partition) { - return StaticDataTask.Row.of( - partition.partitionData, - partition.specId, - partition.dataRecordCount, - partition.dataFileCount, - partition.dataFileSizeInBytes, - partition.posDeleteRecordCount, - partition.posDeleteFileCount, - partition.eqDeleteRecordCount, - partition.eqDeleteFileCount, - partition.lastUpdatedAt, - partition.lastUpdatedSnapshotId); - } - - private static Iterable partitions(Table table, StaticTableScan scan) { - Types.StructType partitionType = Partitioning.partitionType(table); - PartitionMap partitions = new PartitionMap(partitionType); - try (CloseableIterable>> entries = planEntries(scan)) { - for (ManifestEntry> entry : entries) { - Snapshot snapshot = table.snapshot(entry.snapshotId()); - ContentFile file = entry.file(); - StructLike partition = - PartitionUtil.coercePartition( - partitionType, table.specs().get(file.specId()), file.partition()); - partitions.get(partition).update(file, snapshot); - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - - return partitions.all(); - } - - @VisibleForTesting - static CloseableIterable> planEntries(StaticTableScan scan) { - Table table = scan.table(); - - CloseableIterable filteredManifests = - filteredManifests(scan, table, scan.snapshot().allManifests(table.io())); - - Iterable>> tasks = - CloseableIterable.transform(filteredManifests, manifest -> readEntries(manifest, scan)); - - return new ParallelIterable<>(tasks, scan.planExecutor()); - } - - private static CloseableIterable> readEntries( - ManifestFile manifest, StaticTableScan scan) { - Table table = scan.table(); - return CloseableIterable.transform( - ManifestFiles.open(manifest, table.io(), table.specs()) - .caseSensitive(scan.isCaseSensitive()) - .select(scanColumns(manifest.content())) // don't select stats columns - .liveEntries(), - t -> - (ManifestEntry>) - // defensive copy of manifest entry without stats columns - t.copyWithoutStats()); - } - - private static List scanColumns(ManifestContent content) { - switch (content) { - case DATA: - return BaseScan.SCAN_COLUMNS; - case DELETES: - return BaseScan.DELETE_SCAN_COLUMNS; - default: - throw new UnsupportedOperationException("Cannot read unknown manifest type: " + content); - } - } - - private static CloseableIterable filteredManifests( - StaticTableScan scan, Table table, List manifestFilesList) { - CloseableIterable manifestFiles = - CloseableIterable.withNoopClose(manifestFilesList); - - LoadingCache evalCache = - Caffeine.newBuilder() - .build( - specId -> { - PartitionSpec spec = table.specs().get(specId); - PartitionSpec transformedSpec = transformSpec(scan.tableSchema(), spec); - return ManifestEvaluator.forRowFilter( - scan.filter(), transformedSpec, scan.isCaseSensitive()); - }); - - return CloseableIterable.filter( - manifestFiles, manifest -> evalCache.get(manifest.partitionSpecId()).eval(manifest)); - } - - private class PartitionsScan extends StaticTableScan { - PartitionsScan(Table table) { - super( - table, - PartitionsTable.this.schema(), - MetadataTableType.PARTITIONS, - PartitionsTable.this::task); - } - } - - static class PartitionMap { - private final StructLikeMap partitions; - private final Types.StructType keyType; - - PartitionMap(Types.StructType type) { - this.partitions = StructLikeMap.create(type); - this.keyType = type; - } - - Partition get(StructLike key) { - Partition partition = partitions.get(key); - if (partition == null) { - partition = new Partition(key, keyType); - partitions.put(key, partition); - } - return partition; - } - - Iterable all() { - return partitions.values(); - } - } - - static class Partition { - private final PartitionData partitionData; - private int specId; - private long dataRecordCount; - private int dataFileCount; - private long dataFileSizeInBytes; - private long posDeleteRecordCount; - private int posDeleteFileCount; - private long eqDeleteRecordCount; - private int eqDeleteFileCount; - private Long lastUpdatedAt; - private Long lastUpdatedSnapshotId; - - Partition(StructLike key, Types.StructType keyType) { - this.partitionData = toPartitionData(key, keyType); - this.specId = 0; - this.dataRecordCount = 0L; - this.dataFileCount = 0; - this.dataFileSizeInBytes = 0L; - this.posDeleteRecordCount = 0L; - this.posDeleteFileCount = 0; - this.eqDeleteRecordCount = 0L; - this.eqDeleteFileCount = 0; - } - - void update(ContentFile file, Snapshot snapshot) { - if (snapshot != null) { - long snapshotCommitTime = snapshot.timestampMillis() * 1000; - if (this.lastUpdatedAt == null || snapshotCommitTime > this.lastUpdatedAt) { - this.lastUpdatedAt = snapshotCommitTime; - this.lastUpdatedSnapshotId = snapshot.snapshotId(); - } - } - - switch (file.content()) { - case DATA: - this.dataRecordCount += file.recordCount(); - this.dataFileCount += 1; - this.specId = file.specId(); - this.dataFileSizeInBytes += file.fileSizeInBytes(); - break; - case POSITION_DELETES: - this.posDeleteRecordCount = file.recordCount(); - this.posDeleteFileCount += 1; - this.specId = file.specId(); - break; - case EQUALITY_DELETES: - this.eqDeleteRecordCount = file.recordCount(); - this.eqDeleteFileCount += 1; - this.specId = file.specId(); - break; - default: - throw new UnsupportedOperationException( - "Unsupported file content type: " + file.content()); - } - } - - /** Needed because StructProjection is not serializable */ - private PartitionData toPartitionData(StructLike key, Types.StructType keyType) { - PartitionData data = new PartitionData(keyType); - for (int i = 0; i < keyType.fields().size(); i++) { - Object val = key.get(i, keyType.fields().get(i).type().typeId().javaClass()); - if (val != null) { - data.set(i, val); - } - } - return data; - } - } -} diff --git a/iceberg/pom.xml b/iceberg/pom.xml index 8a2f3e0ee4a6..dc9a842e5430 100644 --- a/iceberg/pom.xml +++ b/iceberg/pom.xml @@ -14,18 +14,18 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 hive-iceberg - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT pom Hive Iceberg Modules .. . - 1.4.2 + 1.4.3 4.0.3 3.4.4 1.11.3 diff --git a/itests/custom-serde/pom.xml b/itests/custom-serde/pom.xml index 99715114d4aa..726691455b3c 100644 --- a/itests/custom-serde/pom.xml +++ b/itests/custom-serde/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-custom-serde diff --git a/itests/custom-udfs/pom.xml b/itests/custom-udfs/pom.xml index 424cab059f27..4046d77ce183 100644 --- a/itests/custom-udfs/pom.xml +++ b/itests/custom-udfs/pom.xml @@ -17,7 +17,7 @@ limitations under the License. org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-custom-udfs diff --git a/itests/custom-udfs/udf-classloader-udf1/pom.xml b/itests/custom-udfs/udf-classloader-udf1/pom.xml index 591713cef9bf..d868154e551a 100644 --- a/itests/custom-udfs/udf-classloader-udf1/pom.xml +++ b/itests/custom-udfs/udf-classloader-udf1/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it-custom-udfs - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml org.apache.hive.hive-it-custom-udfs diff --git a/itests/custom-udfs/udf-classloader-udf2/pom.xml b/itests/custom-udfs/udf-classloader-udf2/pom.xml index d28d3e0190dc..1af79f61ad69 100644 --- a/itests/custom-udfs/udf-classloader-udf2/pom.xml +++ b/itests/custom-udfs/udf-classloader-udf2/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it-custom-udfs - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml org.apache.hive.hive-it-custom-udfs diff --git a/itests/custom-udfs/udf-classloader-util/pom.xml b/itests/custom-udfs/udf-classloader-util/pom.xml index aaa9f68fef5b..e68fb6e6c769 100644 --- a/itests/custom-udfs/udf-classloader-util/pom.xml +++ b/itests/custom-udfs/udf-classloader-util/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it-custom-udfs - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml org.apache.hive.hive-it-custom-udfs diff --git a/itests/custom-udfs/udf-vectorized-badexample/pom.xml b/itests/custom-udfs/udf-vectorized-badexample/pom.xml index 09ce6c5b1d7a..1e4ec50ccdac 100644 --- a/itests/custom-udfs/udf-vectorized-badexample/pom.xml +++ b/itests/custom-udfs/udf-vectorized-badexample/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it-custom-udfs - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml org.apache.hive.hive-it-custom-udfs diff --git a/itests/hcatalog-unit/pom.xml b/itests/hcatalog-unit/pom.xml index 3f47452b7e32..e3941f3f92ac 100644 --- a/itests/hcatalog-unit/pom.xml +++ b/itests/hcatalog-unit/pom.xml @@ -22,7 +22,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-hcatalog-it-unit diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java index 155735c6072e..8c28c63b62b4 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/ManyMiniCluster.java @@ -299,12 +299,12 @@ private void setUpMetastore() throws Exception { //The default org.apache.hadoop.hive.ql.hooks.PreExecutePrinter hook //is present only in the ql/test directory - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + hiveConf.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:" + new File(workDir + "/metastore_db") + ";create=true"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.toString(), + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.toString(), new File(workDir, "warehouse").toString()); //set where derby logs File derbyLogFile = new File(workDir + "/derby.log"); diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java index fe33f47793fb..acb37344972c 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/hbase/TestPigHBaseStorageHandler.java @@ -75,9 +75,9 @@ public void Initialize() throws Exception { Path whPath = new Path(fsuri.getScheme(), fsuri.getAuthority(), getTestDir()); hcatConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hcatConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hcatConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hcatConf.set(ConfVars.METASTOREWAREHOUSE.varname, whPath.toString()); + hcatConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hcatConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hcatConf.set(ConfVars.METASTORE_WAREHOUSE.varname, whPath.toString()); hcatConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java index c093055ecff1..eb093e105143 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/mapreduce/TestSequenceFileReadWrite.java @@ -73,10 +73,10 @@ public void setup() throws Exception { warehouseDir = HCatUtil.makePathASafeFileName(dataDir + File.separator + "warehouse"); inputFileName = HCatUtil.makePathASafeFileName(dataDir + File.separator + "input.data"); hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, warehouseDir); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, warehouseDir); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/itests/hive-blobstore/pom.xml b/itests/hive-blobstore/pom.xml index b44eba779123..51cccbda4ebd 100644 --- a/itests/hive-blobstore/pom.xml +++ b/itests/hive-blobstore/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-blobstore diff --git a/itests/hive-jmh/pom.xml b/itests/hive-jmh/pom.xml index defef3de685a..432a4414cf0f 100644 --- a/itests/hive-jmh/pom.xml +++ b/itests/hive-jmh/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-jmh diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java index b33c7da545e2..b597bed53849 100644 --- a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java +++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/AbstractHTLoadBench.java @@ -123,8 +123,8 @@ protected void setupMapJoinHT(HiveConf hiveConf, long seed, int rowCount, TypeInfo[] smallTableValueTypeInfos, int[] smallTableRetainKeyColumnNums, SmallTableGenerationParameters smallTableGenerationParameters) throws Exception { - hiveConf.set(HiveConf.ConfVars.HIVEMAPJOINPARALELHASHTABLETHREADS.varname, LOAD_THREADS_NUM + ""); - LOG.info("Number of threads: " + hiveConf.get(HiveConf.ConfVars.HIVEMAPJOINPARALELHASHTABLETHREADS.varname)); + hiveConf.set(HiveConf.ConfVars.HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS.varname, LOAD_THREADS_NUM + ""); + LOG.info("Number of threads: " + hiveConf.get(HiveConf.ConfVars.HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS.varname)); this.rowCount = rowCount; diff --git a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java index 7af9380b619d..f292cf09481e 100644 --- a/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java +++ b/itests/hive-jmh/src/main/java/org/apache/hive/benchmark/vectorization/mapjoin/load/LegacyVectorMapJoinFastHashTableLoader.java @@ -56,7 +56,7 @@ public LegacyVectorMapJoinFastHashTableLoader(TezContext context, Configuration this.hconf = hconf; this.desc = (MapJoinDesc)joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - this.htLoadCounter = this.tezContext.getTezProcessorContext().getCounters().findCounter(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP), hconf.get("__hive.context.name", "")); + this.htLoadCounter = this.tezContext.getTezProcessorContext().getCounters().findCounter(HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP), hconf.get("__hive.context.name", "")); } @Override @@ -66,7 +66,7 @@ public void init(ExecMapperContext context, MapredContext mrContext, this.hconf = hconf; this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, ""); String counterName = Utilities.getVertexCounterName(HashTableLoaderCounters.HASHTABLE_LOAD_TIME_MS.name(), vertexName); this.htLoadCounter = tezContext.getTezProcessorContext().getCounters().findCounter(counterGroup, counterName); diff --git a/itests/hive-minikdc/pom.xml b/itests/hive-minikdc/pom.xml index c42e99591c05..827b1e5b2c29 100644 --- a/itests/hive-minikdc/pom.xml +++ b/itests/hive-minikdc/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-minikdc diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java index fccf3e0209f8..ce5518f0e4a6 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/JdbcWithMiniKdcSQLAuthTest.java @@ -59,7 +59,7 @@ public static void beforeTestBase(String transportMode) throws Exception { hiveConf.setBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED, true); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); - hiveConf.setBoolVar(ConfVars.HIVEFETCHTASKCACHING, false); + hiveConf.setBoolVar(ConfVars.HIVE_FETCH_TASK_CACHING, false); miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniHiveKdc, hiveConf); miniHS2.start(new HashMap()); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java index 890e4092ea4f..e370810e5e82 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestHs2HooksWithMiniKdc.java @@ -50,12 +50,12 @@ public class TestHs2HooksWithMiniKdc { @BeforeClass public static void setUpBeforeClass() throws Exception { Class.forName(MiniHS2.getJdbcDriverName()); - confOverlay.put(ConfVars.POSTEXECHOOKS.varname, PostExecHook.class.getName()); - confOverlay.put(ConfVars.PREEXECHOOKS.varname, PreExecHook.class.getName()); + confOverlay.put(ConfVars.POST_EXEC_HOOKS.varname, PostExecHook.class.getName()); + confOverlay.put(ConfVars.PRE_EXEC_HOOKS.varname, PreExecHook.class.getName()); confOverlay.put(ConfVars.SEMANTIC_ANALYZER_HOOK.varname, SemanticAnalysisHook.class.getName()); confOverlay.put(ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "" + Boolean.FALSE); - confOverlay.put(ConfVars.HIVEFETCHTASKCACHING.varname, "" + false); + confOverlay.put(ConfVars.HIVE_FETCH_TASK_CACHING.varname, "" + false); miniHiveKdc = new MiniHiveKdc(); HiveConf hiveConf = new HiveConf(); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java index 58bcac694215..3da7b16a0eaf 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStore.java @@ -48,9 +48,9 @@ public static void beforeTest() throws Exception { String hs2Principal = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname); String hs2KeyTab = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB.varname); System.out.println("HS2 principal : " + hs2Principal + " HS2 keytab : " + hs2KeyTab + " Metastore principal : " + metastorePrincipal); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); System.setProperty(ConfVars.METASTORE_USE_THRIFT_SASL.varname, String.valueOf(MetastoreConf.getBoolVar(hiveConf, MetastoreConf.ConfVars.USE_THRIFT_SASL))); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java index 74d8e777597e..f666077de886 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithDBTokenStoreNoDoAs.java @@ -48,11 +48,11 @@ public static void beforeTest() throws Exception { String hs2Principal = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL.varname); String hs2KeyTab = miniHS2.getConfProperty(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB.varname); System.out.println("HS2 principal : " + hs2Principal + " HS2 keytab : " + hs2KeyTab + " Metastore principal : " + metastorePrincipal); - System.setProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE)); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.CONNECT_URL_KEY)); - System.setProperty(HiveConf.ConfVars.METASTOREURIS.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_URIS.varname, MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.THRIFT_URIS)); System.setProperty(ConfVars.METASTORE_USE_THRIFT_SASL.varname, String.valueOf(MetastoreConf.getBoolVar(hiveConf, MetastoreConf.ConfVars.USE_THRIFT_SASL))); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java index 883d333dd48d..77dabb42bd27 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestJdbcWithMiniKdcCookie.java @@ -81,7 +81,7 @@ public void setUp() throws Exception { hiveConf.setTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE, 1, TimeUnit.SECONDS); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setBoolVar(ConfVars.HIVEFETCHTASKCACHING, false); + hiveConf.setBoolVar(ConfVars.HIVE_FETCH_TASK_CACHING, false); miniHS2 = MiniHiveKdc.getMiniHS2WithKerb(miniHiveKdc, hiveConf); miniHS2.start(new HashMap()); diff --git a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java index 7ca74efb648b..4ee239a7cbff 100644 --- a/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java +++ b/itests/hive-minikdc/src/test/java/org/apache/hive/minikdc/TestSSLWithMiniKdc.java @@ -58,7 +58,7 @@ public static void beforeTest() throws Exception { SSLTestUtils.setMetastoreSslConf(hiveConf); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setBoolVar(ConfVars.HIVEFETCHTASKCACHING, false); + hiveConf.setBoolVar(ConfVars.HIVE_FETCH_TASK_CACHING, false); setHMSSaslConf(miniHiveKdc, hiveConf); diff --git a/itests/hive-unit-hadoop2/pom.xml b/itests/hive-unit-hadoop2/pom.xml index 0f4ca2d891b7..5ab24c76f3d4 100644 --- a/itests/hive-unit-hadoop2/pom.xml +++ b/itests/hive-unit-hadoop2/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-unit-hadoop2 diff --git a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java index 61d2e920be7b..5f952a356951 100644 --- a/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java +++ b/itests/hive-unit-hadoop2/src/test/java/org/apache/hadoop/hive/ql/security/TestStorageBasedMetastoreAuthorizationProviderWithACL.java @@ -26,7 +26,6 @@ import java.lang.reflect.Method; import java.net.URI; -import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.List; @@ -91,14 +90,14 @@ protected HiveConf createHiveConf() throws Exception { warehouseDir = new Path(new Path(fs.getUri()), "/warehouse"); fs.mkdirs(warehouseDir); - conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseDir.toString()); + conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, warehouseDir.toString()); extWarehouseDir = new Path(new Path(fs.getUri()), "/external"); fs.mkdirs(extWarehouseDir); conf.setVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL, extWarehouseDir.toString()); // Set up scratch directory Path scratchDir = new Path(new Path(fs.getUri()), "/scratchdir"); - conf.setVar(HiveConf.ConfVars.SCRATCHDIR, scratchDir.toString()); + conf.setVar(HiveConf.ConfVars.SCRATCH_DIR, scratchDir.toString()); return conf; } diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml index 29ece2167223..a1c9be35bb8c 100644 --- a/itests/hive-unit/pom.xml +++ b/itests/hive-unit/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-unit @@ -26,7 +26,7 @@ ../.. 1.15.2 - 2.67.0 + 2.70.0 diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java index b3dfa961a6c3..c25aa1df5262 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/UtilsForTest.java @@ -39,7 +39,7 @@ public class UtilsForTest { public static void setNewDerbyDbLocation(HiveConf conf, String newloc) { String newDbLoc = System.getProperty("test.tmp.dir") + newloc + "metastore_db"; - conf.setVar(ConfVars.METASTORECONNECTURLKEY, "jdbc:derby:;databaseName=" + newDbLoc + conf.setVar(ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:derby:;databaseName=" + newDbLoc + ";create=true"); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java index 7d441b6acc03..6526c8c82835 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/hooks/TestHs2Hooks.java @@ -19,7 +19,6 @@ //The tests here are heavily based on some timing, so there is some chance to fail. package org.apache.hadoop.hive.hooks; -import java.io.Serializable; import java.lang.Override; import java.sql.Statement; import java.util.List; @@ -140,14 +139,14 @@ public void postAnalyze(HiveSemanticAnalyzerHookContext context, @BeforeClass public static void setUpBeforeClass() throws Exception { HiveConf hiveConf = new HiveConf(); - hiveConf.setVar(ConfVars.PREEXECHOOKS, + hiveConf.setVar(ConfVars.PRE_EXEC_HOOKS, PreExecHook.class.getName()); - hiveConf.setVar(ConfVars.POSTEXECHOOKS, + hiveConf.setVar(ConfVars.POST_EXEC_HOOKS, PostExecHook.class.getName()); hiveConf.setVar(ConfVars.SEMANTIC_ANALYZER_HOOK, SemanticAnalysisHook.class.getName()); hiveConf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - hiveConf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(ConfVars.HIVE_STATS_COL_AUTOGATHER, false); hiveServer2 = new HiveServer2(); hiveServer2.init(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java index c10060f8171d..a0d5bd2c99b3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/AbstractTestAuthorizationApiAuthorizer.java @@ -59,9 +59,9 @@ protected static void setup() throws Exception { if (isRemoteMetastoreMode) { MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf); } - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); msc = new HiveMetaStoreClient(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java index 9ddad9922652..06ba4dae0c1a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreAuthorization.java @@ -44,7 +44,7 @@ public class TestMetaStoreAuthorization { public void setup() throws Exception { conf.setBoolVar(HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS, true); - conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + conf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); conf.setTimeVar(ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, 60, TimeUnit.SECONDS); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java index 2f7a2601627c..a94d1da8cc1c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMetrics.java @@ -42,7 +42,7 @@ public class TestMetaStoreMetrics { @BeforeClass public static void before() throws Exception { hiveConf = new HiveConf(TestMetaStoreMetrics.class); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); hiveConf.setBoolVar(HiveConf.ConfVars.METASTORE_METRICS, true); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); hiveConf diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java index 4dcfb2266a0c..a901d87c75eb 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreMultipleEncryptionZones.java @@ -84,18 +84,18 @@ public static void setUp() throws Exception { DFSTestUtil.createKey("test_key_cm", miniDFSCluster, conf); DFSTestUtil.createKey("test_key_db", miniDFSCluster, conf); hiveConf = new HiveConf(TestReplChangeManager.class); - hiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); hiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); cmroot = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmroot"; cmrootFallBack = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootFallback"; cmrootEncrypted = "cmrootEncrypted"; - hiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmroot); - hiveConf.set(HiveConf.ConfVars.REPLCMENCRYPTEDDIR.varname, cmrootEncrypted); - hiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmrootFallBack); + hiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmroot); + hiveConf.set(HiveConf.ConfVars.REPL_CM_ENCRYPTED_DIR.varname, cmrootEncrypted); + hiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmrootFallBack); initReplChangeManager(); try { @@ -1253,14 +1253,14 @@ public void recycleFailureWithDifferentEncryptionZonesForCm() throws Throwable { @Test public void testClearerEncrypted() throws Exception { HiveConf hiveConfCmClearer = new HiveConf(TestReplChangeManager.class); - hiveConfCmClearer.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConfCmClearer.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); hiveConfCmClearer.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - hiveConfCmClearer.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hiveConfCmClearer.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootCmClearer = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootClearer"; - hiveConfCmClearer.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootCmClearer); + hiveConfCmClearer.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootCmClearer); Warehouse warehouseCmClearer = new Warehouse(hiveConfCmClearer); FileSystem cmfs = new Path(cmrootCmClearer).getFileSystem(hiveConfCmClearer); cmfs.mkdirs(warehouseCmClearer.getWhRoot()); @@ -1359,21 +1359,21 @@ public void testClearerEncrypted() throws Exception { @Test public void testCmRootAclPermissions() throws Exception { HiveConf hiveConfAclPermissions = new HiveConf(TestReplChangeManager.class); - hiveConfAclPermissions.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConfAclPermissions.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); hiveConfAclPermissions.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - hiveConfAclPermissions.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + hiveConfAclPermissions.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmRootAclPermissions = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmRootAclPermissions"; - hiveConfAclPermissions.set(HiveConf.ConfVars.REPLCMDIR.varname, cmRootAclPermissions); + hiveConfAclPermissions.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmRootAclPermissions); Warehouse warehouseCmPermissions = new Warehouse(hiveConfAclPermissions); FileSystem cmfs = new Path(cmRootAclPermissions).getFileSystem(hiveConfAclPermissions); cmfs.mkdirs(warehouseCmPermissions.getWhRoot()); FileSystem fsWarehouse = warehouseCmPermissions.getWhRoot().getFileSystem(hiveConfAclPermissions); //change the group of warehouse for testing - Path warehouse = new Path(hiveConfAclPermissions.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname)); + Path warehouse = new Path(hiveConfAclPermissions.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname)); fsWarehouse.setOwner(warehouse, null, "testgroup"); long now = System.currentTimeMillis(); @@ -1434,7 +1434,7 @@ public void testCmRootAclPermissions() throws Exception { return null; }); - String cmEncrypted = hiveConf.get(HiveConf.ConfVars.REPLCMENCRYPTEDDIR.varname, cmrootEncrypted); + String cmEncrypted = hiveConf.get(HiveConf.ConfVars.REPL_CM_ENCRYPTED_DIR.varname, cmrootEncrypted); AclStatus aclStatus = fsWarehouse.getAclStatus(new Path(dirTbl1 + Path.SEPARATOR + cmEncrypted)); AclStatus aclStatus2 = fsWarehouse.getAclStatus(new Path(dirTbl2 + Path.SEPARATOR + cmEncrypted)); AclStatus aclStatus3 = fsWarehouse.getAclStatus(new Path(dirTbl3 + Path.SEPARATOR + cmEncrypted)); @@ -1501,17 +1501,17 @@ public void testCmRootAclPermissions() throws Exception { @Test public void testCmrootEncrypted() throws Exception { HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class); - encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + encryptedHiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootDirEncrypted"; - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootdirEncrypted); FileSystem cmrootdirEncryptedFs = new Path(cmrootdirEncrypted).getFileSystem(hiveConf); cmrootdirEncryptedFs.mkdirs(new Path(cmrootdirEncrypted)); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmrootFallBack); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmrootFallBack); //Create cm in encrypted zone EncryptionZoneUtils.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db", conf); @@ -1562,11 +1562,11 @@ public void testCmrootEncrypted() throws Exception { @Test public void testCmrootFallbackEncrypted() throws Exception { HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class); - encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + encryptedHiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootIsEncrypted"; String cmRootFallbackEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootFallbackEncrypted"; @@ -1574,8 +1574,8 @@ public void testCmrootFallbackEncrypted() throws Exception { try { cmrootdirEncryptedFs.mkdirs(new Path(cmrootdirEncrypted)); cmrootdirEncryptedFs.mkdirs(new Path(cmRootFallbackEncrypted)); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmRootFallbackEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootdirEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmRootFallbackEncrypted); //Create cm in encrypted zone EncryptionZoneUtils.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db", conf); @@ -1600,19 +1600,19 @@ public void testCmrootFallbackEncrypted() throws Exception { @Test public void testCmrootFallbackRelative() throws Exception { HiveConf encryptedHiveConf = new HiveConf(TestReplChangeManager.class); - encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + encryptedHiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); encryptedHiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); - encryptedHiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, + encryptedHiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() - + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); + + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); String cmrootdirEncrypted = "hdfs://" + miniDFSCluster.getNameNode().getHostAndPort() + "/cmrootIsEncrypted"; String cmRootFallbackEncrypted = "cmrootFallbackEncrypted"; FileSystem cmrootdirEncryptedFs = new Path(cmrootdirEncrypted).getFileSystem(encryptedHiveConf); try { cmrootdirEncryptedFs.mkdirs(new Path(cmrootdirEncrypted)); cmrootdirEncryptedFs.mkdirs(new Path(cmRootFallbackEncrypted)); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmrootdirEncrypted); - encryptedHiveConf.set(HiveConf.ConfVars.REPLCMFALLBACKNONENCRYPTEDDIR.varname, cmRootFallbackEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmrootdirEncrypted); + encryptedHiveConf.set(HiveConf.ConfVars.REPL_CM_FALLBACK_NONENCRYPTED_DIR.varname, cmRootFallbackEncrypted); //Create cm in encrypted zone EncryptionZoneUtils.createEncryptionZone(new Path(cmrootdirEncrypted), "test_key_db", conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java index 981f5fb4c211..69db7180cfff 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestMetastoreVersion.java @@ -70,7 +70,7 @@ public void setUp() throws Exception { DummyPreListener.class.getName()); testMetastoreDB = System.getProperty("java.io.tmpdir") + File.separator + "test_metastore-" + System.currentTimeMillis(); - System.setProperty(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, + System.setProperty(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:" + testMetastoreDB + ";create=true"); metastoreSchemaInfo = MetaStoreSchemaInfoFactory.get(hiveConf, System.getProperty("test.tmp.dir", "target/tmp"), "derby"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java index ebac38d10944..78304634c4ed 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestReplChangeManager.java @@ -97,11 +97,11 @@ private static void internalSetUpProvidePerm() throws Exception { configuration.set("dfs.client.use.datanode.hostname", "true"); permDdfs = new MiniDFSCluster.Builder(configuration).numDataNodes(2).format(true).build(); permhiveConf = new HiveConf(TestReplChangeManager.class); - permhiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, - "hdfs://" + permDdfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); - permhiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + permhiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, + "hdfs://" + permDdfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); + permhiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); permCmroot = "hdfs://" + permDdfs.getNameNode().getHostAndPort() + "/cmroot"; - permhiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, permCmroot); + permhiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, permCmroot); permhiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); permWarehouse = new Warehouse(permhiveConf); } @@ -109,11 +109,11 @@ private static void internalSetUpProvidePerm() throws Exception { private static void internalSetUp() throws Exception { m_dfs = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(2).format(true).build(); hiveConf = new HiveConf(TestReplChangeManager.class); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, - "hdfs://" + m_dfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTOREWAREHOUSE.defaultStrVal); - hiveConf.setBoolean(HiveConf.ConfVars.REPLCMENABLED.varname, true); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, + "hdfs://" + m_dfs.getNameNode().getHostAndPort() + HiveConf.ConfVars.METASTORE_WAREHOUSE.defaultStrVal); + hiveConf.setBoolean(HiveConf.ConfVars.REPL_CM_ENABLED.varname, true); cmroot = "hdfs://" + m_dfs.getNameNode().getHostAndPort() + "/cmroot"; - hiveConf.set(HiveConf.ConfVars.REPLCMDIR.varname, cmroot); + hiveConf.set(HiveConf.ConfVars.REPL_CM_DIR.varname, cmroot); hiveConf.setInt(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, 60); warehouse = new Warehouse(hiveConf); fs = new Path(cmroot).getFileSystem(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java index 17542f177c1e..e2fbf0450270 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestServerSpecificConfig.java @@ -181,7 +181,7 @@ private void setHiveSiteWithRemoteMetastore() throws IOException { FileOutputStream out = new FileOutputStream(hiveSite); HiveConf.setHiveSiteLocation(oldDefaultHiveSite); HiveConf defaultHiveConf = new HiveConf(); - defaultHiveConf.setVar(ConfVars.METASTOREURIS, "dummyvalue"); + defaultHiveConf.setVar(ConfVars.METASTORE_URIS, "dummyvalue"); // reset to the hive-site.xml values for following param defaultHiveConf.set("hive.dummyparam.test.server.specific.config.override", "from.hive-site.xml"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java index 04917d22a8c4..2db61aa6c06a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStoreUpdateUsingEvents.java @@ -585,7 +585,7 @@ private void updateTableColStats(String dbName, String tblName, String[] colName colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -627,7 +627,7 @@ private void updatePartColStats(String dbName, String tblName, boolean isTxnTabl colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -940,7 +940,7 @@ public void testTableColumnStatisticsTxnTableMultiAbort() throws Throwable { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -1007,7 +1007,7 @@ public void testTableColumnStatisticsTxnTableOpenTxn() throws Throwable { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, highValue, avgColLen)); colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); @@ -1056,7 +1056,7 @@ private void verifyAggrStat(String dbName, String tblName, String[] colName, Lis // This will update the cache for non txn table. PartitionsStatsRequest request = new PartitionsStatsRequest(dbName, tblName, - Collections.singletonList(colName[0]), partitions, Constants.HIVE_ENGINE); + Collections.singletonList(colName[0]), partitions); request.setCatName(DEFAULT_CATALOG_NAME); request.setValidWriteIdList(validWriteIds); AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request); @@ -1123,7 +1123,7 @@ public void testAggrStatTxnTable() throws Throwable { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, 5, 20)); colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); hmsHandler.update_partition_column_statistics_req(setTblColStat); @@ -1136,7 +1136,7 @@ public void testAggrStatTxnTable() throws Throwable { // keep the txn open and verify that the stats got is not compliant. PartitionsStatsRequest request = new PartitionsStatsRequest(dbName, tblName, - Collections.singletonList(colName[0]), partitions, Constants.HIVE_ENGINE); + Collections.singletonList(colName[0]), partitions); request.setCatName(DEFAULT_CATALOG_NAME); request.setValidWriteIdList(validWriteIds); AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request); @@ -1175,7 +1175,7 @@ public void testAggrStatAbortTxn() throws Throwable { colStats.setStatsObj(getStatsObjects(dbName, tblName, colName, 5, 20)); colStats.setEngine(Constants.HIVE_ENGINE); - SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + SetPartitionsStatsRequest setTblColStat = new SetPartitionsStatsRequest(Collections.singletonList(colStats)); setTblColStat.setWriteId(writeId); setTblColStat.setValidWriteIdList(validWriteIds); hmsHandler.update_partition_column_statistics_req(setTblColStat); @@ -1191,7 +1191,7 @@ public void testAggrStatAbortTxn() throws Throwable { // keep the txn open and verify that the stats got is not compliant. PartitionsStatsRequest request = new PartitionsStatsRequest(dbName, tblName, - Collections.singletonList(colName[0]), partitions, Constants.HIVE_ENGINE); + Collections.singletonList(colName[0]), partitions); request.setCatName(DEFAULT_CATALOG_NAME); request.setValidWriteIdList(validWriteIds); AggrStats aggrStatsCached = hmsHandler.get_aggr_stats_for(request); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java index af4f4bb36196..def816c41d66 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaTool.java @@ -20,7 +20,6 @@ import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; import java.nio.file.Files; @@ -61,8 +60,7 @@ import org.junit.Assert; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertEquals; -import com.google.gson.JsonParser; -import org.json.JSONObject; + import org.junit.Before; import org.junit.After; import org.junit.Test; @@ -110,15 +108,15 @@ public void setUp() throws Exception { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, getWarehouseDir()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.SPLIT_UPDATE, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); hiveConf.setBoolean("mapred.input.dir.recursive", true); TestTxnDbUtil.setConfValues(hiveConf); txnHandler = TxnUtils.getTxnStore(hiveConf); @@ -184,7 +182,7 @@ private void createTable() throws Exception { @Test public void testListFSRoot() throws Exception { - HiveMetaTool.main(new String[] {"-listFSRoot"}); + HiveMetaTool.execute(new String[] {"-listFSRoot"}); String out = os.toString(); assertTrue(out + " doesn't contain " + client.getDatabase(DB_NAME).getLocationUri(), out.contains(client.getDatabase(DB_NAME).getLocationUri())); @@ -192,7 +190,7 @@ public void testListFSRoot() throws Exception { @Test public void testExecuteJDOQL() throws Exception { - HiveMetaTool.main( + HiveMetaTool.execute( new String[] {"-executeJDOQL", "select locationUri from org.apache.hadoop.hive.metastore.model.MDatabase"}); String out = os.toString(); assertTrue(out + " doesn't contain " + client.getDatabase(DB_NAME).getLocationUri(), @@ -203,10 +201,10 @@ public void testExecuteJDOQL() throws Exception { public void testUpdateFSRootLocation() throws Exception { checkAvroSchemaURLProps(AVRO_URI); - HiveMetaTool.main(new String[] {"-updateLocation", NEW_LOCATION, LOCATION, "-tablePropKey", "avro.schema.url"}); + HiveMetaTool.execute(new String[] {"-updateLocation", NEW_LOCATION, LOCATION, "-tablePropKey", "avro.schema.url"}); checkAvroSchemaURLProps(NEW_AVRO_URI); - HiveMetaTool.main(new String[] {"-updateLocation", LOCATION, NEW_LOCATION, "-tablePropKey", "avro.schema.url"}); + HiveMetaTool.execute(new String[] {"-updateLocation", LOCATION, NEW_LOCATION, "-tablePropKey", "avro.schema.url"}); checkAvroSchemaURLProps(AVRO_URI); } @@ -389,7 +387,7 @@ private String getAbsolutePath(String extTblLocation) { return "file:" + extTblLocation; } - private JSONObject getListExtTblLocs(String dbName, String outLocation) throws IOException { + private JSONObject getListExtTblLocs(String dbName, String outLocation) throws Exception { File f = new File(outLocation); if (f.exists()) { FileUtil.fullyDelete(f); @@ -397,7 +395,7 @@ private JSONObject getListExtTblLocs(String dbName, String outLocation) throws I if (!(new File(outLocation).mkdirs())) { throw new RuntimeException("Could not create " + outLocation); } - HiveMetaTool.main(new String[] {"-listExtTblLocs", dbName, outLocation}); + HiveMetaTool.execute(new String[] {"-listExtTblLocs", dbName, outLocation}); for (File outFile : f.listFiles()) { String contents = new String(Files.readAllBytes(Paths.get(outFile.getAbsolutePath()))); return new JSONObject(contents); @@ -405,7 +403,7 @@ private JSONObject getListExtTblLocs(String dbName, String outLocation) throws I return null; } - private JSONObject getDiffExtTblLocs(String fileLoc1, String fileLoc2, String outLocation) throws IOException { + private JSONObject getDiffExtTblLocs(String fileLoc1, String fileLoc2, String outLocation) throws Exception { File f = new File(outLocation); if (f.exists()) { FileUtil.fullyDelete(f); @@ -417,7 +415,7 @@ private JSONObject getDiffExtTblLocs(String fileLoc1, String fileLoc2, String ou File f2 = new File(fileLoc2); for (File outFile1 : f1.listFiles()) { for (File outFile2 : f2.listFiles()) { - HiveMetaTool.main(new String[] {"-diffExtTblLocs", outFile1.getAbsolutePath(), outFile2.getAbsolutePath(), outLocation}); + HiveMetaTool.execute(new String[] {"-diffExtTblLocs", outFile1.getAbsolutePath(), outFile2.getAbsolutePath(), outLocation}); for(File outFile : f.listFiles()) { String contents = new String(Files.readAllBytes(Paths.get(outFile.getAbsolutePath()))); return new JSONObject(contents); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java index 36ba35f2aea0..245fc156512c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestAcidOnTez.java @@ -113,12 +113,12 @@ public String toString() { @Before public void setUp() throws Exception { hiveConf = new HiveConf(this.getClass()); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); @@ -344,7 +344,7 @@ public void testNonStandardConversion01() throws Exception { * data files in directly. * * Actually Insert Into ... select ... union all ... with - * HIVE_OPTIMIZE_UNION_REMOVE (and HIVEFETCHTASKCONVERSION="none"?) will create subdirs + * HIVE_OPTIMIZE_UNION_REMOVE (and HIVE_FETCH_TASK_CONVERSION="none"?) will create subdirs * but if writing to non acid table there is a merge task on MR (but not on Tez) */ @Ignore("HIVE-17214")//this consistently works locally but never in ptest.... @@ -757,7 +757,7 @@ public void testGetSplitsLocks() throws Exception { HiveConf modConf = new HiveConf(hiveConf); setupTez(modConf); modConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez"); - modConf.setVar(ConfVars.HIVEFETCHTASKCONVERSION, "more"); + modConf.setVar(ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); modConf.setVar(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS, "localhost"); // SessionState/Driver needs to be restarted with the Tez conf settings. @@ -818,7 +818,7 @@ public void testGetSplitsLocksWithMaterializedView() throws Exception { HiveConf modConf = new HiveConf(hiveConf); setupTez(modConf); modConf.setVar(ConfVars.HIVE_EXECUTION_ENGINE, "tez"); - modConf.setVar(ConfVars.HIVEFETCHTASKCONVERSION, "more"); + modConf.setVar(ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); modConf.setVar(HiveConf.ConfVars.LLAP_DAEMON_SERVICE_HOSTS, "localhost"); // SessionState/Driver needs to be restarted with the Tez conf settings. @@ -868,7 +868,7 @@ public void testCrudMajorCompactionSplitGrouper() throws Exception { // make a clone of existing hive conf HiveConf confForTez = new HiveConf(hiveConf); setupTez(confForTez); // one-time setup to make query able to run with Tez - HiveConf.setVar(confForTez, HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + HiveConf.setVar(confForTez, HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); runStatementOnDriver("create transactional table " + tblName + " (a int, b int) clustered by (a) into 2 buckets " + "stored as ORC TBLPROPERTIES('bucketing_version'='2', 'transactional'='true'," + " 'transactional_properties'='default')", confForTez); @@ -1020,9 +1020,9 @@ public static void setupTez(HiveConf conf) { } private void setupMapJoin(HiveConf conf) { - conf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN, true); - conf.setBoolVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK, true); - conf.setLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD, 100000); + conf.setBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONALTASK, true); + conf.setLongVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD, 100000); } private List runStatementOnDriver(String stmt) throws Exception { diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java index 12b626d09d3d..1e1a8d74ae1a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestConstraintsMerge.java @@ -28,9 +28,7 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.tez.mapreduce.hadoop.MRJobConfig; import org.junit.After; -import org.junit.Assert; import org.junit.Before; -import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestName; @@ -73,12 +71,12 @@ public String toString() { @Before public void setUp() throws Exception { hiveConf = new HiveConf(this.getClass()); - hiveConf.set(ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + hiveConf.set(ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(ConfVars.METASTORE_WAREHOUSE.varname, TEST_WAREHOUSE_DIR); hiveConf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, false); - hiveConf.setVar(ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setVar(ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setVar(ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf .setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java index 7e54dde6f926..b22a3c0f3fde 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDDLWithRemoteMetastoreSecondNamenode.java @@ -83,10 +83,10 @@ public void setUp() throws Exception { // Test with remote metastore service int port = MetaStoreTestUtils.startMetaStoreWithRetry(); - conf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - conf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + conf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + conf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); conf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, new URI(tmppath + "/warehouse").getPath()); + conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, new URI(tmppath + "/warehouse").getPath()); // Initialize second mocked filesystem (implement only necessary stuff) // Physical files are resides in local file system in the similar location diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java index 041be2d063f7..f5cbd1636962 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestDatabaseTableDefault.java @@ -23,7 +23,6 @@ import java.io.File; import java.util.ArrayList; import java.util.List; -import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.FileUtils; @@ -36,9 +35,7 @@ import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.metastore.api.Table; import org.junit.After; -import org.junit.Assert; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -93,7 +90,7 @@ public void setUp() throws Exception { HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.CREATE_TABLES_AS_ACID, true); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CREATE_TABLES_AS_INSERT_ONLY, true); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); - hiveConf.set(HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT.varname, "ORC"); + hiveConf.set(HiveConf.ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT.varname, "ORC"); hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); hiveConf.set(HiveConf.ConfVars.METASTORE_CLIENT_CAPABILITIES.varname, "HIVEFULLACIDREAD,HIVEFULLACIDWRITE,HIVECACHEINVALIDATE,HIVEMANAGESTATS,HIVEMANAGEDINSERTWRITE,HIVEMANAGEDINSERTREAD"); TestTxnDbUtil.setConfValues(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java index 05a104f53ea5..470b5ccdfd0e 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/TestMetaStoreLimitPartitionRequest.java @@ -33,7 +33,6 @@ import java.util.Set; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HMSHandler; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hive.jdbc.miniHS2.MiniHS2; @@ -72,7 +71,7 @@ public static void beforeTest() throws Exception { conf.setIntVar(HiveConf.ConfVars.METASTORE_LIMIT_PARTITION_REQUEST, PARTITION_REQUEST_LIMIT); conf.setBoolVar(HiveConf.ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN, true); conf.setBoolVar(HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, true); - conf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true); + conf.setBoolVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_CBO_ENABLED, false); miniHS2 = new MiniHS2.Builder().withConf(conf).build(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java index 127de2301b16..6ae8239c667b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/history/TestHiveHistory.java @@ -188,7 +188,7 @@ public void testQueryloglocParentDirNotExist() throws Exception { try { String actualDir = parentTmpDir + "/test"; HiveConf conf = new HiveConf(SessionState.class); - conf.set(HiveConf.ConfVars.HIVEHISTORYFILELOC.toString(), actualDir); + conf.set(HiveConf.ConfVars.HIVE_HISTORY_FILE_LOC.toString(), actualDir); SessionState ss = new CliSessionState(conf); HiveHistory hiveHistory = new HiveHistoryImpl(ss); Path actualPath = new Path(actualDir); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java index 379c53bb2f7d..f81f99d2c8ea 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplWithReadOnlyHook.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.hive.common.repl.ReplConst.READ_ONLY_HOOK; import static org.junit.Assert.assertEquals; -import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -66,7 +65,7 @@ public static void classLevelSetup() throws Exception { acidEnableConf.put(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET.varname, "false"); acidEnableConf.put(HiveConf.ConfVars.REPL_RETAIN_CUSTOM_LOCATIONS_FOR_DB_ON_TARGET.varname, "false"); - acidEnableConf.put(HiveConf.ConfVars.PREEXECHOOKS.varname, READ_ONLY_HOOK); + acidEnableConf.put(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, READ_ONLY_HOOK); acidEnableConf.putAll(overrides); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java index 731eb9c6bd73..92879d5ebba3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOnHDFSEncryptedZones.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.AfterClass; -import org.junit.Assert; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Rule; @@ -119,7 +118,7 @@ public void targetAndSourceHaveDifferentEncryptionZoneKeys() throws Throwable { put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname, UserGroupInformation.getCurrentUser().getUserName()); - put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir); + put(HiveConf.ConfVars.REPL_DIR.varname, primary.repldDir); }}, "test_key123"); //read should pass without raw-byte distcp @@ -162,7 +161,7 @@ public void targetAndSourceHaveSameEncryptionZoneKeys() throws Throwable { put(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS.varname, "false"); put(HiveConf.ConfVars.HIVE_DISTCP_DOAS_USER.varname, UserGroupInformation.getCurrentUser().getUserName()); - put(HiveConf.ConfVars.REPLDIR.varname, primary.repldDir); + put(HiveConf.ConfVars.REPL_DIR.varname, primary.repldDir); }}, "test_key"); List dumpWithClause = Arrays.asList( diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java index 91f8f76e1ff5..0d3178e8619b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationOptimisedBootstrap.java @@ -42,7 +42,6 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; import org.apache.hadoop.hive.ql.parse.repl.metric.event.ReplicationMetric; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Stage; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metric; @@ -54,7 +53,6 @@ import org.junit.BeforeClass; import org.junit.Test; -import javax.annotation.Nullable; import java.io.File; import java.util.ArrayList; import java.util.Arrays; @@ -68,11 +66,6 @@ import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_ENABLE_BACKGROUND_THREAD; import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_TARGET_DB_PROPERTY; import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_FAILOVER_ENDPOINT; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_FAILBACK_COUNT; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_FAILOVER_COUNT; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_LAST_FAILBACK_ENDTIME; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_LAST_FAILBACK_STARTTIME; -import static org.apache.hadoop.hive.common.repl.ReplConst.REPL_METRICS_LAST_FAILOVER_TYPE; import static org.apache.hadoop.hive.common.repl.ReplConst.TARGET_OF_REPLICATION; import static org.apache.hadoop.hive.metastore.ReplChangeManager.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.ql.exec.repl.OptimisedBootstrapUtils.EVENT_ACK_FILE; @@ -137,7 +130,7 @@ public void tearDown() throws Throwable { @Test public void testBuildTableDiffGeneration() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Create two external & two managed tables and do a bootstrap dump & load. WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create external table t1 (id int)") @@ -202,7 +195,7 @@ public void testBuildTableDiffGeneration() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); @@ -263,7 +256,7 @@ public void testEmptyDiffForControlFailover() throws Throwable { // In case of control failover both A & B will be in sync, so the table diff should be created empty, without any // error. List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -293,7 +286,7 @@ public void testEmptyDiffForControlFailover() throws Throwable { Path newReplDir = new Path(replica.repldDir + "rev"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); @@ -317,7 +310,7 @@ public void testEmptyDiffForControlFailover() throws Throwable { @Test public void testFirstIncrementalMandatory() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Create one external and one managed tables and do a bootstrap dump. WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create external table t1 (id int)") @@ -343,7 +336,7 @@ public void testFirstIncrementalMandatory() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a dump on cluster B, it should throw an exception, since the first incremental isn't done yet. try { @@ -355,13 +348,13 @@ public void testFirstIncrementalMandatory() throws Throwable { // Do a incremental cycle and check we don't get this exception. withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); primary.dump(primaryDbName, withClause); replica.load(replicatedDbName, primaryDbName, withClause); // Retrigger reverse dump, this time it should be successful and event ack should get created. withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); tuple = replica.dump(replicatedDbName, withClause); @@ -373,7 +366,7 @@ public void testFirstIncrementalMandatory() throws Throwable { @Test public void testFailureCasesInTableDiffGeneration() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -418,7 +411,7 @@ public void testFailureCasesInTableDiffGeneration() throws Throwable { Path newReplDir = new Path(replica.repldDir + "reverse"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Trigger dump on target cluster. @@ -505,7 +498,7 @@ public void testFailureCasesInTableDiffGeneration() throws Throwable { @Test public void testReverseReplicationFailureWhenSourceDbIsDropped() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle. primary.dump(primaryDbName, withClause); @@ -542,7 +535,7 @@ public void testReverseReplicationFailureWhenSourceDbIsDropped() throws Throwabl Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump, this should create event_ack file tuple = replica.dump(replicatedDbName, withClause); @@ -800,7 +793,7 @@ public void testReverseBootstrapWithFailedIncremental() throws Throwable { @Test public void testOverwriteDuringBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle. primary.dump(primaryDbName, withClause); @@ -862,7 +855,7 @@ public void testOverwriteDuringBootstrap() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); @@ -928,7 +921,7 @@ public void testOverwriteDuringBootstrap() throws Throwable { @Test public void testTblMetricRegisterDuringSecondCycleOfOptimizedBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create table t1_managed (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")") @@ -968,7 +961,7 @@ public void testTblMetricRegisterDuringSecondCycleOfOptimizedBootstrap() throws Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump @@ -1004,7 +997,7 @@ public void testTblMetricRegisterDuringSecondCycleOfOptimizedBootstrap() throws @Test public void testTblMetricRegisterDuringSecondLoadCycleOfOptimizedBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); WarehouseInstance.Tuple tuple = primary.run("use " + primaryDbName) .run("create table t1_managed (id int) clustered by(id) into 3 buckets stored as orc " + "tblproperties (\"transactional\"=\"true\")") @@ -1044,7 +1037,7 @@ public void testTblMetricRegisterDuringSecondLoadCycleOfOptimizedBootstrap() thr Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(false); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump @@ -1085,7 +1078,7 @@ public void testTblMetricRegisterDuringSecondLoadCycleOfOptimizedBootstrap() thr @NotNull private List setUpFirstIterForOptimisedBootstrap() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle. primary.dump(primaryDbName, withClause); @@ -1210,7 +1203,7 @@ private List setUpFirstIterForOptimisedBootstrap() throws Throwable { Path newReplDir = new Path(replica.repldDir + "1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); // Do a reverse dump tuple = replica.dump(replicatedDbName, withClause); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java index 9345d34bc096..6ffaecab38d0 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenarios.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.common.repl.ReplScope; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.repl.ReplAck; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.StringAppender; import org.apache.hadoop.hive.ql.parse.repl.metric.MetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; @@ -203,16 +202,16 @@ static void internalBeforeClassSetup(Map additionalProperties) MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false); hconf.set(MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS.getHiveName(), DBNOTIF_LISTENER_CLASSNAME); // turn on db notification listener on metastore - hconf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); + hconf.setBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED, true); hconf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true); - hconf.setVar(HiveConf.ConfVars.REPLCMDIR, TEST_PATH + "/cmroot/"); + hconf.setVar(HiveConf.ConfVars.REPL_CM_DIR, TEST_PATH + "/cmroot/"); proxySettingName = "hadoop.proxyuser." + Utils.getUGI().getShortUserName() + ".hosts"; hconf.set(proxySettingName, "*"); MetastoreConf.setBoolVar(hconf, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false); - hconf.setVar(HiveConf.ConfVars.REPLDIR,TEST_PATH + "/hrepl/"); + hconf.setVar(HiveConf.ConfVars.REPL_DIR,TEST_PATH + "/hrepl/"); hconf.set(MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES.getHiveName(), "3"); - hconf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hconf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hconf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hconf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); hconf.set(HiveConf.ConfVars.HIVE_IN_TEST_REPL.varname, "true"); hconf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); hconf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); @@ -221,13 +220,13 @@ static void internalBeforeClassSetup(Map additionalProperties) hconf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); hconf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, "/tmp/warehouse/external"); - hconf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, true); - hconf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true); + hconf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, true); + hconf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, true); hconf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_RELIABLE, true); hconf.setBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET, false); hconf.setBoolVar(HiveConf.ConfVars.REPL_BATCH_INCREMENTAL_EVENTS, false); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); additionalProperties.forEach((key, value) -> { hconf.set(key, value); @@ -247,7 +246,7 @@ static void internalBeforeClassSetup(Map additionalProperties) FileUtils.deleteDirectory(new File("metastore_db2")); HiveConf hconfMirrorServer = new HiveConf(); - hconfMirrorServer.set(HiveConf.ConfVars.METASTORECONNECTURLKEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true"); + hconfMirrorServer.set(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY.varname, "jdbc:derby:;databaseName=metastore_db2;create=true"); MetaStoreTestUtils.startMetaStoreWithRetry(hconfMirrorServer, true); hconfMirror = new HiveConf(hconf); MetastoreConf.setBoolVar(hconfMirror, MetastoreConf.ConfVars.EVENT_DB_NOTIFICATION_API_AUTH, false); @@ -4050,7 +4049,7 @@ public void testDeleteStagingDir() throws IOException { @Override public boolean accept(Path path) { - return path.getName().startsWith(HiveConf.getVar(hconf, HiveConf.ConfVars.STAGINGDIR)); + return path.getName().startsWith(HiveConf.getVar(hconf, HiveConf.ConfVars.STAGING_DIR)); } }; FileStatus[] statuses = fs.listStatus(path, filter); @@ -4230,7 +4229,7 @@ public void testRecycleFileDropTempTable() throws IOException { run("INSERT INTO " + dbName + ".normal values (1)", driver); run("DROP TABLE " + dbName + ".normal", driver); - String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + String cmDir = hconf.getVar(HiveConf.ConfVars.REPL_CM_DIR); Path path = new Path(cmDir); FileSystem fs = path.getFileSystem(hconf); ContentSummary cs = fs.getContentSummary(path); @@ -4285,7 +4284,7 @@ public void testLoadCmPathMissing() throws Exception { run("DROP TABLE " + dbName + ".normal", driver); - String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + String cmDir = hconf.getVar(HiveConf.ConfVars.REPL_CM_DIR); Path path = new Path(cmDir); FileSystem fs = path.getFileSystem(hconf); ContentSummary cs = fs.getContentSummary(path); @@ -4370,7 +4369,7 @@ public void testDDLTasksInParallel() throws Throwable{ StringAppender appender = null; LoggerConfig loggerConfig = null; try { - driverMirror.getConf().set(HiveConf.ConfVars.EXECPARALLEL.varname, "true"); + driverMirror.getConf().set(HiveConf.ConfVars.EXEC_PARALLEL.varname, "true"); logger = LogManager.getLogger("hive.ql.metadata.Hive"); oldLevel = logger.getLevel(); ctx = (LoggerContext) LogManager.getContext(false); @@ -4403,7 +4402,7 @@ public void testDDLTasksInParallel() throws Throwable{ assertEquals(count, 2); appender.reset(); } finally { - driverMirror.getConf().set(HiveConf.ConfVars.EXECPARALLEL.varname, "false"); + driverMirror.getConf().set(HiveConf.ConfVars.EXEC_PARALLEL.varname, "false"); loggerConfig.setLevel(oldLevel); ctx.updateLoggers(); appender.removeFromLogger(logger.getName()); @@ -4414,7 +4413,7 @@ public void testDDLTasksInParallel() throws Throwable{ public void testRecycleFileNonReplDatabase() throws IOException { String dbName = createDBNonRepl(testName.getMethodName(), driver); - String cmDir = hconf.getVar(HiveConf.ConfVars.REPLCMDIR); + String cmDir = hconf.getVar(HiveConf.ConfVars.REPL_CM_DIR); Path path = new Path(cmDir); FileSystem fs = path.getFileSystem(hconf); ContentSummary cs = fs.getContentSummary(path); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java index 93fd5f0cbc9f..63ba2aad1888 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTables.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java index e393c3dcd9ce..6d9fea15fd55 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcidTablesBootstrap.java @@ -28,10 +28,10 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.BehaviourInjection; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.ErrorMsg; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.Assert; +import org.junit.Ignore; import org.junit.Test; import org.junit.BeforeClass; @@ -139,7 +139,7 @@ public Boolean apply(@Nullable CallerArguments args) { } finally { InjectableBehaviourObjectStore.resetAlterTableModifier(); } - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -238,6 +238,7 @@ public void testAcidTablesBootstrapDuringIncrementalWithOpenTxnsTimeout() throws } @Test + @Ignore("HIVE-27936") public void testBootstrapAcidTablesDuringIncrementalWithConcurrentWrites() throws Throwable { // Dump and load bootstrap without ACID tables. WarehouseInstance.Tuple bootstrapDump = prepareDataAndDump(primaryDbName, diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java index fb2ad07acb7a..893ccd48786a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosAcrossInstances.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.hadoop.hive.ql.parse.repl.PathBuilder; import org.apache.hadoop.hive.ql.parse.repl.dump.Utils; -import org.apache.hadoop.hive.ql.processors.CommandProcessorException; import org.apache.hadoop.hive.ql.util.DependencyResolver; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; @@ -77,7 +76,6 @@ import static org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.NON_RECOVERABLE_MARKER; -import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -515,7 +513,7 @@ public void testParallelExecutionOfReplicationBootStrapLoad() throws Throwable { .run("create table t3 (rank int)") .dump(primaryDbName); - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, true); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, true); replica.load(replicatedDbName, primaryDbName) .run("use " + replicatedDbName) .run("repl status " + replicatedDbName) @@ -525,7 +523,7 @@ public void testParallelExecutionOfReplicationBootStrapLoad() throws Throwable { .run("select country from t2") .verifyResults(Arrays.asList("india", "australia", "russia", "uk", "us", "france", "japan", "china")); - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, false); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, false); } @Test @@ -710,8 +708,8 @@ public void testBootStrapDumpOfWarehouse() throws Throwable { public void testReplLoadFromSourceUsingWithClause() throws Throwable { HiveConf replicaConf = replica.getConf(); List withConfigs = Arrays.asList( - "'hive.metastore.warehouse.dir'='" + replicaConf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE) + "'", - "'hive.metastore.uris'='" + replicaConf.getVar(HiveConf.ConfVars.METASTOREURIS) + "'", + "'hive.metastore.warehouse.dir'='" + replicaConf.getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE) + "'", + "'hive.metastore.uris'='" + replicaConf.getVar(HiveConf.ConfVars.METASTORE_URIS) + "'", "'hive.repl.replica.functions.root.dir'='" + replicaConf.getVar(HiveConf.ConfVars.REPL_FUNCTIONS_ROOT_DIR) + "'"); //////////// Bootstrap //////////// @@ -1654,7 +1652,7 @@ public Boolean apply(@Nullable CallerArguments args) { // is loaded before t2. So that scope is set to table in first iteration for table t1. In the next iteration, it // loads only remaining partitions of t2, so that the table tracker has no tasks. - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -1994,7 +1992,7 @@ public void testRangerReplicationRetryExhausted() throws Throwable { ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode()); } //Delete non recoverable marker to fix this - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2041,7 +2039,7 @@ public void testFailureUnsupportedAuthorizerReplication() throws Throwable { ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode()); } //Delete non recoverable marker to fix this - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2160,7 +2158,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2170,7 +2168,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_ATLAS_REPLICATED_TO_DB.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2178,7 +2176,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2186,7 +2184,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME.varname, true); ensureFailedAdminRepl(getAtlasClause(confMap), true); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2200,7 +2198,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_ATLAS_ENDPOINT.varname, false); ensureFailedAdminRepl(getAtlasClause(confMap), false); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2210,7 +2208,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_SOURCE_CLUSTER_NAME.varname, false); ensureFailedAdminRepl(getAtlasClause(confMap), false); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -2218,7 +2216,7 @@ public void testAtlasMissingConfigs() throws Throwable { ensureFailedReplOperation(getAtlasClause(confMap), HiveConf.ConfVars.REPL_TARGET_CLUSTER_NAME.varname, false); ensureFailedAdminRepl(getAtlasClause(confMap), false); //Delete non recoverable marker to fix this - baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); nonRecoverablePath = getNonRecoverablePath(baseDumpDir, primaryDbName); Assert.assertNotNull(nonRecoverablePath); baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java index 8710e2c70a0d..9645f8d03fe3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExclusiveReplica.java @@ -96,7 +96,7 @@ public void tearDown() throws Throwable { @Test public void testTargetEventIdGenerationAfterFirstIncrementalInOptFailover() throws Throwable { List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -156,7 +156,7 @@ public void testTargetEventIdGenerationAfterFirstIncrementalInOptFailover() thro Path newReplDir = new Path(replica.repldDir + "reverse1"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); tuple = replica.dump(replicatedDbName); @@ -177,7 +177,7 @@ public void testTargetEventIdGenerationAfterFirstIncrementalInOptFailover() thro public void testTargetEventIdGenerationInOptmisedFailover() throws Throwable { // Do a a cycle of bootstrap dump & load. List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -265,7 +265,7 @@ public void testTargetEventIdGenerationInOptmisedFailover() throws Throwable { Path newReplDir = new Path(replica.repldDir + "reverse01"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); tuple = replica.dump(replicatedDbName, withClause); @@ -285,7 +285,7 @@ public void testTargetEventIdGenerationInOptmisedFailover() throws Throwable { public void testTargetEventIdWithNotificationsExpiredInOptimisedFailover() throws Throwable { // Do a a cycle of bootstrap dump & load. List withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + primary.repldDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + primary.repldDir + "'"); // Do a bootstrap cycle(A->B) primary.dump(primaryDbName, withClause); @@ -347,7 +347,7 @@ public NotificationEventResponse apply(@Nullable NotificationEventResponse event Path newReplDir = new Path(replica.repldDir + "reverse01"); replicaFs.mkdirs(newReplDir); withClause = ReplicationTestUtils.includeExternalTableClause(true); - withClause.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + newReplDir + "'"); + withClause.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + newReplDir + "'"); try { replica.dump(replicatedDbName, withClause); @@ -1091,7 +1091,7 @@ private void verifyTableDataExists(WarehouseInstance warehouse, Path dbDataPath, private List getStagingLocationConfig(String stagingLoc, boolean addDistCpConfigs) throws IOException { List confList = new ArrayList<>(); - confList.add("'" + HiveConf.ConfVars.REPLDIR.varname + "'='" + stagingLoc + "'"); + confList.add("'" + HiveConf.ConfVars.REPL_DIR.varname + "'='" + stagingLoc + "'"); if (addDistCpConfigs) { confList.add("'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXSIZE.varname + "'='1'"); confList.add("'" + HiveConf.ConfVars.HIVE_EXEC_COPYFILE_MAXNUMFILES.varname + "'='0'"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java index 9eefd04e7f9a..8badc4c2895b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosExternalTables.java @@ -1026,7 +1026,7 @@ public Boolean apply(@Nullable CallerArguments args) { InjectableBehaviourObjectStore.resetAlterTableModifier(); } - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -1302,7 +1302,7 @@ public void testExternalTableBaseDirMandatory() throws Throwable { ErrorMsg.getErrorMsg(e.getMessage()).getErrorCode()); } //delete non recoverable marker - Path dumpPath = new Path(primary.hiveConf.get(HiveConf.ConfVars.REPLDIR.varname), + Path dumpPath = new Path(primary.hiveConf.get(HiveConf.ConfVars.REPL_DIR.varname), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase() .getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = dumpPath.getFileSystem(conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java index cdc87733581f..f941d183b82d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestReplicationScenariosUsingSnapshots.java @@ -488,7 +488,7 @@ public void testFailureScenarios() throws Throwable { // Ignore } // Check if there is a non-recoverable error or not. - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); assertTrue(fs.exists(nonRecoverablePath)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java index ff7733d2b9b6..2c0a70398d42 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java @@ -137,7 +137,7 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { primary.run("create scheduled query s1_t1 every 5 seconds as repl dump " + primaryDbName); replica.run("create scheduled query s2_t1 every 5 seconds as repl load " + primaryDbName + " INTO " + replicatedDbName); - Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR), + Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase().getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = FileSystem.get(dumpRoot.toUri(), primary.hiveConf); @@ -208,7 +208,7 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { primary.run("create scheduled query s1_t2 every 5 seconds as repl dump " + primaryDbName + withClause); replica.run("create scheduled query s2_t2 every 5 seconds as repl load " + primaryDbName + " INTO " + replicatedDbName + withClause); - Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR), + Path dumpRoot = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR), Base64.getEncoder().encodeToString(primaryDbName.toLowerCase().getBytes(StandardCharsets.UTF_8.name()))); FileSystem fs = FileSystem.get(dumpRoot.toUri(), primary.hiveConf); next = Integer.parseInt(ReplDumpWork.getTestInjectDumpDir()) + 1; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java index e23c542d670b..58fb5958b734 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestStatsReplicationScenarios.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore.CallerArguments; import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreClientWithLocalCache; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.junit.After; @@ -46,7 +45,6 @@ import org.junit.rules.TestName; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.junit.Ignore; import org.junit.Assert; import java.io.IOException; @@ -114,7 +112,7 @@ static void internalBeforeClassSetup(Map primaryOverrides, // Run with autogather false on primary if requested Map sourceOverrides = new HashMap<>(); hasAutogather = autogather; - additionalOverrides.put(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, + additionalOverrides.put(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER.varname, autogather ? "true" : "false"); sourceOverrides.putAll(additionalOverrides); sourceOverrides.putAll(primaryOverrides); @@ -337,7 +335,7 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, // Load, if necessary changing configuration. if (parallelLoad) { - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, true); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, true); } // Fail load if for testing failure and retry scenario. Fail the load while setting @@ -350,7 +348,7 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, } } - Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPLDIR)); + Path baseDumpDir = new Path(primary.hiveConf.getVar(HiveConf.ConfVars.REPL_DIR)); Path nonRecoverablePath = TestReplicationScenarios.getNonRecoverablePath(baseDumpDir, primaryDbName, primary.hiveConf); if(nonRecoverablePath != null){ baseDumpDir.getFileSystem(primary.hiveConf).delete(nonRecoverablePath, true); @@ -366,7 +364,7 @@ private String dumpLoadVerify(List tableNames, String lastReplicationId, } if (parallelLoad) { - replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXECPARALLEL, false); + replica.hiveConf.setBoolVar(HiveConf.ConfVars.EXEC_PARALLEL, false); } // Test statistics diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTimedOutTxnNotificationLogging.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTimedOutTxnNotificationLogging.java new file mode 100644 index 000000000000..130e4908b3c2 --- /dev/null +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestTimedOutTxnNotificationLogging.java @@ -0,0 +1,202 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hadoop.hive.cli.CliSessionState; +import org.apache.hadoop.hive.common.repl.ReplScope; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.MetastoreTaskThread; +import org.apache.hadoop.hive.metastore.ObjectStore; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.messaging.AbortTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.MessageBuilder; +import org.apache.hadoop.hive.metastore.messaging.MessageDeserializer; +import org.apache.hadoop.hive.metastore.messaging.OpenTxnMessage; +import org.apache.hadoop.hive.metastore.messaging.event.filters.AndFilter; +import org.apache.hadoop.hive.metastore.messaging.event.filters.CatalogFilter; +import org.apache.hadoop.hive.metastore.messaging.event.filters.EventBoundaryFilter; +import org.apache.hadoop.hive.metastore.messaging.event.filters.ReplEventFilter; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidTxnCleanerService; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; +import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; +import org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hive.hcatalog.listener.DbNotificationListener; +import org.apache.thrift.TException; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; + +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + + +@RunWith(Parameterized.class) +public class TestTimedOutTxnNotificationLogging { + + private HiveConf hiveConf; + + private ObjectStore objectStore; + + private MetastoreTaskThread acidTxnCleanerService; + + private MetastoreTaskThread acidHouseKeeperService; + + private static IMetaStoreClient hive; + + @Parameterized.Parameter + public int numberOfTxns; + + @Parameterized.Parameter(1) + public TxnType txnType; + + @Parameterized.Parameter(2) + public int expectedNotifications; + + @Parameterized.Parameters(name = "{index}: numberOfTxns={0},txnType={1},expectedNotifications={2}") + public static Collection data() { + return Arrays.asList( + new Object[][] { { 3, TxnType.REPL_CREATED, 3 }, { 3, TxnType.DEFAULT, 3 }, { 3, TxnType.READ_ONLY, 0 } }); + } + + @Before + public void setUp() throws Exception { + setConf(); + TestTxnDbUtil.prepDb(hiveConf); + SessionState.start(new CliSessionState(hiveConf)); + hive = new HiveMetaStoreClient(hiveConf); + objectStore = new ObjectStore(); + objectStore.setConf(hiveConf); + acidTxnCleanerService = new AcidTxnCleanerService(); + acidTxnCleanerService.setConf(hiveConf); + acidHouseKeeperService = new AcidHouseKeeperService(); + acidHouseKeeperService.setConf(hiveConf); + } + + private void setConf() { + hiveConf = new HiveConf(); + MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.HIVE_IN_TEST, true); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, "/tmp"); + MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.TXN_TIMEOUT, 1, TimeUnit.SECONDS); + HiveConf.setVar(hiveConf, HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, + SQLStdHiveAuthorizerFactory.class.getName()); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS, + DbNotificationListener.class.getName()); + MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.EVENT_DB_LISTENER_CLEAN_INTERVAL, 10, + TimeUnit.MILLISECONDS); + MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.EVENT_DB_LISTENER_CLEAN_STARTUP_WAIT_INTERVAL, 0, + TimeUnit.SECONDS); + } + + @After + public void tearDown() throws Exception { + TestTxnDbUtil.cleanDb(hiveConf); + if (hive != null) { + hive.close(); + } + SessionState.get().close(); + hiveConf = null; + } + + @Test + public void testTxnNotificationLogging() throws Exception { + try { + List txnIds = openTxns(numberOfTxns, txnType); + Assert.assertEquals(txnIds.size(), getNumberOfTxnsWithTxnState(txnIds, TxnState.OPEN)); + Assert.assertEquals(expectedNotifications, getNumberOfNotificationsWithEventType(txnIds, MessageBuilder.OPEN_TXN_EVENT)); + Thread.sleep(1000); + acidHouseKeeperService.run(); //this will abort timed-out txns + if (txnType != TxnType.REPL_CREATED) { + Assert.assertEquals(txnIds.size(), getNumberOfTxnsWithTxnState(txnIds, TxnState.ABORTED)); + Assert.assertEquals(expectedNotifications, getNumberOfNotificationsWithEventType(txnIds, MessageBuilder.ABORT_TXN_EVENT)); + } + } finally { + runCleanerServices(); + } + } + + private int getNumberOfNotificationsWithEventType(List txnIds, String eventType) throws TException { + int numNotifications = 0; + IMetaStoreClient.NotificationFilter evFilter = new AndFilter(new ReplEventFilter(new ReplScope()), + new CatalogFilter(MetaStoreUtils.getDefaultCatalog(hiveConf)), new EventBoundaryFilter(0, 100)); + NotificationEventResponse rsp = hive.getNextNotification(new NotificationEventRequest(), true, evFilter); + if (rsp.getEvents() == null) { + return numNotifications; + } + Iterator eventIterator = rsp.getEvents().iterator(); + MessageDeserializer deserializer; + while (eventIterator.hasNext()) { + NotificationEvent ev = eventIterator.next(); + if (eventType.equals(ev.getEventType())) { + deserializer = ReplUtils.getEventDeserializer(ev); + switch (ev.getEventType()) { + case MessageBuilder.OPEN_TXN_EVENT: + OpenTxnMessage openTxnMessage = deserializer.getOpenTxnMessage(ev.getMessage()); + if (txnIds.contains(openTxnMessage.getTxnIds().get(0))) { + numNotifications++; + } + break; + case MessageBuilder.ABORT_TXN_EVENT: + AbortTxnMessage abortTxnMessage = deserializer.getAbortTxnMessage(ev.getMessage()); + if (txnIds.contains(abortTxnMessage.getTxnId())) { + numNotifications++; + } + } + } + } + return numNotifications; + } + + private List openTxns(int txnCounter, TxnType txnType) throws TException { + List txnIds = new LinkedList<>(); + for (; txnCounter > 0; txnCounter--) { + if (txnType == TxnType.REPL_CREATED) { + Long srcTxn = (long) (11 + txnCounter); + List srcTxns = Collections.singletonList(srcTxn); + txnIds.addAll(hive.replOpenTxn("testPolicy", srcTxns, "hive", txnType)); + } else { + txnIds.add(hive.openTxn("hive", txnType)); + } + } + return txnIds; + } + + private int getNumberOfTxnsWithTxnState(List txnIds, TxnState txnState) throws TException { + AtomicInteger numTxns = new AtomicInteger(); + hive.showTxns().getOpen_txns().forEach(txnInfo -> { + if (txnInfo.getState() == txnState && txnIds.contains(txnInfo.getId())) { + numTxns.incrementAndGet(); + } + }); + return numTxns.get(); + } + + private void runCleanerServices() { + objectStore.cleanNotificationEvents(0); + acidTxnCleanerService.run(); //this will remove empty aborted txns + } +} \ No newline at end of file diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java index e7701b54ca6e..ac656b45cff7 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/WarehouseInstance.java @@ -129,35 +129,35 @@ private void initialize(String cmRoot, String externalTableWarehouseRoot, String Map overridesForHiveConf) throws Exception { hiveConf = new HiveConf(miniDFSCluster.getConfiguration(0), TestReplicationScenarios.class); - String metaStoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTOREURIS.varname); + String metaStoreUri = System.getProperty("test." + HiveConf.ConfVars.METASTORE_URIS.varname); if (metaStoreUri != null) { - hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, metaStoreUri); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, metaStoreUri); return; } // hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, hiveInTest); // turn on db notification listener on meta store - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, warehouseRoot); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, warehouseRoot); hiveConf.setVar(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL, externalTableWarehouseRoot); hiveConf.setVar(HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, LISTENER_CLASS); - hiveConf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); + hiveConf.setBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED, true); hiveConf.setBoolVar(HiveConf.ConfVars.FIRE_EVENTS_FOR_DML, true); - hiveConf.setVar(HiveConf.ConfVars.REPLCMDIR, cmRoot); + hiveConf.setVar(HiveConf.ConfVars.REPL_CM_DIR, cmRoot); hiveConf.setVar(HiveConf.ConfVars.REPL_FUNCTIONS_ROOT_DIR, functionsRoot); hiveConf.setBoolVar(HiveConf.ConfVars.REPL_DUMP_METADATA_ONLY_FOR_EXTERNAL_TABLE, false); - hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, + hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:derby:memory:${test.tmp.dir}/APP;create=true"); - hiveConf.setVar(HiveConf.ConfVars.REPLDIR, this.repldDir); - hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + hiveConf.setVar(HiveConf.ConfVars.REPL_DIR, this.repldDir); + hiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); if (!hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER).equals("org.apache.hadoop.hive.ql.lockmgr.DbTxnManager")) { hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); } hiveConf.set(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL.varname, "org.apache.hadoop.hive.metastore.InjectableBehaviourObjectStore"); - System.setProperty(HiveConf.ConfVars.PREEXECHOOKS.varname, " "); - System.setProperty(HiveConf.ConfVars.POSTEXECHOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, " "); + System.setProperty(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, " "); for (Map.Entry entry : overridesForHiveConf.entrySet()) { hiveConf.set(entry.getKey(), entry.getValue()); @@ -181,14 +181,14 @@ private void initialize(String cmRoot, String externalTableWarehouseRoot, String */ - /*hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "jdbc:mysql://localhost:3306/APP"); + /*hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:mysql://localhost:3306/APP"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); - hiveConf.setVar(HiveConf.ConfVars.METASTOREPWD, "hivepassword"); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_PWD, "hivepassword"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "hiveuser");*/ - /*hiveConf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,"jdbc:postgresql://localhost/app"); + /*hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY,"jdbc:postgresql://localhost/app"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "org.postgresql.Driver"); - hiveConf.setVar(HiveConf.ConfVars.METASTOREPWD, "password"); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_PWD, "password"); hiveConf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "postgres");*/ driver = DriverFactory.newDriver(hiveConf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java index b3383d923bec..b2e2678a8165 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/StorageBasedMetastoreTestBase.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener; @@ -77,11 +76,11 @@ public void setUp() throws Exception { // Turn off client-side authorization clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java index b78c348c5203..0235aef1d54b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestAuthorizationPreEventListener.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext; @@ -68,12 +67,12 @@ public void setUp() throws Exception { clientHiveConf = new HiveConf(this.getClass()); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); SessionState.start(new CliSessionState(clientHiveConf)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java index 948ab4d10370..120d967a4754 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestClientSideAuthorizationProvider.java @@ -80,13 +80,13 @@ public void setUp() throws Exception { clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName()); clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, ""); - clientHiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java index 3fa1d0d5b50d..72a953fea425 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreAuthorizationProvider.java @@ -113,12 +113,12 @@ public void setUp() throws Exception { // Turn off client-side authorization clientHiveConf.setBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED,false); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); @@ -175,7 +175,7 @@ public void testSimplePrivileges() throws Exception { String tblName = getTestTableName(); String userName = setupUser(); String loc = clientHiveConf.get(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname) + "/" + dbName; - String mLoc = clientHiveConf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname) + "/" + dbName; + String mLoc = clientHiveConf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname) + "/" + dbName; allowCreateDatabase(userName); driver.run("create database " + dbName + " location '" + loc + "' managedlocation '" + mLoc + "'"); Database db = msc.getDatabase(dbName); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java index dbd71cb0ceb4..b166df33b8bd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMetastoreClientSideAuthorizationProvider.java @@ -63,13 +63,13 @@ public void setUp() throws Exception { clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHENTICATOR_MANAGER.varname, InjectableDummyAuthenticator.class.getName()); clientHiveConf.set(HiveConf.ConfVars.HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS.varname, ""); - clientHiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); - clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3); + clientHiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); + clientHiveConf.setIntVar(HiveConf.ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, 3); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - clientHiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - clientHiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + clientHiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); ugi = Utils.getUGI(); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java index d1e80698efa9..1a5a840c8680 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/TestMultiAuthorizationPreEventListener.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.security.DummyHiveMetastoreAuthorizationProvider.AuthCallContext; @@ -63,7 +62,7 @@ public static void setUp() throws Exception { clientHiveConf = new HiveConf(); - clientHiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + port); + clientHiveConf.setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + port); clientHiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); SessionState.start(new CliSessionState(clientHiveConf)); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index b7148fa98e1e..0d939af9de46 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -107,7 +107,7 @@ public static void beforeTest() throws Exception { conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); conf.setBoolVar(ConfVars.HIVE_QUERY_RESULTS_CACHE_ENABLED, true); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); conf.setBoolVar(ConfVars.HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE, true); conf.setBoolVar(ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE, false); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java index 8645812ab95f..8e09d9697f4a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/session/TestClearDanglingScratchDir.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.util.Shell; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -56,12 +55,12 @@ static public void oneTimeSetup() throws Exception { conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true"); conf.set(HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL.toString(), "true"); LoggerFactory.getLogger("SessionState"); - conf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, + conf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, new Path(System.getProperty("test.tmp.dir"), "warehouse").toString()); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, m_dfs.getFileSystem().getUri().toString()); - scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); m_dfs.getFileSystem().mkdirs(scratchDir); m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777")); } @@ -163,7 +162,7 @@ public void testLocalDanglingFilesCleaning() throws Exception { // Simulating hdfs dangling dir and its inuse.lck file // Note: Give scratch dirs all the write permissions FsPermission allPermissions = new FsPermission((short)00777); - customScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + customScratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); Utilities.createDirsWithPermission(conf, customScratchDir, allPermissions, true); Path hdfsRootDir = new Path(customScratchDir + l + userName + l + hdfs); Path hdfsSessionDir = new Path(hdfsRootDir + l + userName + l + appId); @@ -171,7 +170,7 @@ public void testLocalDanglingFilesCleaning() throws Exception { fs.create(hdfsSessionLock); // Simulating local dangling files - customLocalTmpDir = new Path (HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + customLocalTmpDir = new Path (HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); Path localSessionDir = new Path(customLocalTmpDir + l + appId); Path localPipeOutFileRemove = new Path(customLocalTmpDir + l + appId + "-started-with-session-name.pipeout"); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java index 5a3324831e0d..03c6f70e0fe1 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorOnTezTest.java @@ -45,7 +45,6 @@ import org.junit.ClassRule; import org.junit.rules.TemporaryFolder; -import java.io.EOFException; import java.io.File; import java.io.IOException; import java.util.Collections; @@ -102,11 +101,11 @@ protected void setupWithConf(HiveConf hiveConf) throws Exception { if (!(new File(TEST_WAREHOUSE_DIR).mkdirs())) { throw new RuntimeException("Could not create " + TEST_WAREHOUSE_DIR); } - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.TXN_OPENTXN_TIMEOUT, 2, TimeUnit.SECONDS); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java index 4a7bb34bad1a..67af2443b259 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCleanerWithReplication.java @@ -52,9 +52,9 @@ public class TestCleanerWithReplication extends CompactorTest { public void setup() throws Exception { HiveConf conf = new HiveConf(); conf.set("fs.defaultFS", miniDFSCluster.getFileSystem().getUri().toString()); - conf.setBoolVar(HiveConf.ConfVars.REPLCMENABLED, true); + conf.setBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED, true); setup(conf); - cmRootDirectory = new Path(conf.get(HiveConf.ConfVars.REPLCMDIR.varname)); + cmRootDirectory = new Path(conf.get(HiveConf.ConfVars.REPL_CM_DIR.varname)); if (!fs.exists(cmRootDirectory)) { fs.mkdirs(cmRootDirectory); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java index fd6e111df0ac..1083bef80e9b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactor.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; @@ -52,9 +52,6 @@ import org.apache.hadoop.hive.ql.io.orc.Reader; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.txn.compactor.Cleaner; -import org.apache.hadoop.hive.ql.txn.compactor.FSRemover; -import org.apache.hadoop.hive.ql.txn.compactor.MetadataCache; import org.apache.hadoop.hive.ql.txn.compactor.handler.TaskHandler; import org.apache.hadoop.hive.ql.txn.compactor.handler.TaskHandlerFactory; import org.apache.hive.streaming.HiveStreamingConnection; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java index a57a817e1612..b7c689e9effc 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCompactorBase.java @@ -81,12 +81,12 @@ public void setup() throws Exception { } HiveConf hiveConf = new HiveConf(this.getClass()); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, ""); - hiveConf.setVar(HiveConf.ConfVars.METASTOREWAREHOUSE, TEST_WAREHOUSE_DIR); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, ""); + hiveConf.setVar(HiveConf.ConfVars.METASTORE_WAREHOUSE, TEST_WAREHOUSE_DIR); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEAN_ABORTS_USING_CLEANER, true); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java index 295bbdd09f79..f64fac9038d9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestCrudCompactorOnTez.java @@ -50,7 +50,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.Driver; @@ -105,7 +105,7 @@ public void testRebalanceCompactionWithParallelDeleteAsSecondPessimisticLock() t private void testRebalanceCompactionWithParallelDeleteAsSecond(boolean optimisticLock) throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); conf.setBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK, optimisticLock); //set grouping size to have 3 buckets, and re-create driver with the new config @@ -204,7 +204,7 @@ private void testRebalanceCompactionWithParallelDeleteAsSecond(boolean optimisti public void testRebalanceCompactionOfNotPartitionedImplicitlyBucketedTableWithOrder() throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); //set grouping size to have 3 buckets, and re-create driver with the new config conf.set("tez.grouping.min-size", "400"); @@ -260,7 +260,7 @@ public void testRebalanceCompactionOfNotPartitionedImplicitlyBucketedTableWithOr public void testRebalanceCompactionOfNotPartitionedImplicitlyBucketedTable() throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); //set grouping size to have 3 buckets, and re-create driver with the new config conf.set("tez.grouping.min-size", "400"); @@ -314,7 +314,7 @@ public void testRebalanceCompactionOfNotPartitionedImplicitlyBucketedTable() thr public void testRebalanceCompactionOfPartitionedImplicitlyBucketedTable() throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); //set grouping size to have 3 buckets, and re-create driver with the new config conf.set("tez.grouping.min-size", "1"); @@ -423,7 +423,7 @@ public void testRebalanceCompactionOfPartitionedImplicitlyBucketedTable() throws public void testRebalanceCompactionOfNotPartitionedExplicitlyBucketedTable() throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); final String tableName = "rebalance_test"; executeStatementOnDriver("drop table if exists " + tableName, driver); @@ -445,7 +445,7 @@ public void testRebalanceCompactionOfNotPartitionedExplicitlyBucketedTable() thr public void testRebalanceCompactionNotPartitionedExplicitBucketNumbers() throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_COMPACTOR_GATHER_STATS, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); //set grouping size to have 3 buckets, and re-create driver with the new config conf.set("tez.grouping.min-size", "400"); @@ -885,10 +885,10 @@ public void testMajorCompactionNotPartitionedWithoutBuckets() throws Exception { Assert.assertEquals("pre-compaction bucket 0", expectedRsBucket0, testDataProvider.getBucketData(tblName, "536870912")); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner CompactorTestUtil.runCompaction(conf, dbName, tblName, CompactionType.MAJOR, true); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(1); @@ -2215,8 +2215,8 @@ public void testCompactionWithSchemaEvolutionAndBuckets() throws Exception { @Test public void testCompactionWithSchemaEvolutionNoBucketsMultipleReducers() throws Exception { HiveConf hiveConf = new HiveConf(conf); - hiveConf.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); - hiveConf.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 2); driver = DriverFactory.newDriver(hiveConf); String dbName = "default"; String tblName = "testCompactionWithSchemaEvolutionNoBucketsMultipleReducers"; @@ -2915,10 +2915,10 @@ public void testCompactionWithCreateTableProps() throws Exception { CompactionRequest rqst = new CompactionRequest(dbName, tblName, CompactionType.MAJOR); CompactionResponse resp = txnHandler.compact(rqst); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner runWorker(conf); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); @@ -2981,10 +2981,10 @@ public void testCompactionWithAlterTableProps() throws Exception { // Get all data before compaction is run List expectedData = testDP.getAllData(tblName); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner runWorker(conf); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); @@ -3098,7 +3098,7 @@ public void testStatsAfterCompactionPartTblForQueryBasedCompaction() throws Exce public void testStatsAfterCompactionPartTbl(boolean isQueryBased, boolean isAutoGatherStats, CompactionType compactionType) throws Exception { conf.setBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED, isQueryBased); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, isAutoGatherStats); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, isAutoGatherStats); String dbName = "default"; String tblName = "minor_compaction_test"; IMetaStoreClient msClient = new HiveMetaStoreClient(conf); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java index 14197a5326d2..e79ce9560d98 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorOnTez.java @@ -41,7 +41,7 @@ public void testCompactorGatherStats() throws Exception{ conf.setBoolVar(HiveConf.ConfVars.HIVE_WRITE_ACID_VERSION_FILE, true); conf.setVar(HiveConf.ConfVars.COMPACTOR_JOB_QUEUE, CUSTOM_COMPACTION_QUEUE); conf.setBoolVar(HiveConf.ConfVars.HIVE_MR_COMPACTOR_GATHER_STATS, true); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); conf.setVar(HiveConf.ConfVars.HIVE_PROTO_EVENTS_BASE_PATH, tmpFolder); String dbName = "default"; @@ -59,10 +59,10 @@ public void testCompactorGatherStats() throws Exception{ executeStatementOnDriver("analyze table " + dbName + "." + tableName + " compute statistics for columns", driver); executeStatementOnDriver("insert into " + dbName + "." + tableName + " values(2)", driver); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MAJOR, false); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(1); @@ -76,10 +76,10 @@ public void testCompactorGatherStats() throws Exception{ executeStatementOnDriver("alter table " + dbName + "." + tableName + " set tblproperties('compactor.mapred.job.queue.name'='" + CUSTOM_COMPACTION_QUEUE + "')", driver); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); // Run major compaction and cleaner CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MAJOR, false); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(2); @@ -90,10 +90,10 @@ public void testCompactorGatherStats() throws Exception{ assertEquals("Value should contain new data", 1, colStats.get(0).getStatsData().getLongStats().getLowValue()); executeStatementOnDriver("insert into " + dbName + "." + tableName + " values(4)", driver); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MAJOR, false, Collections.singletonMap("compactor.mapred.job.queue.name", CUSTOM_COMPACTION_QUEUE)); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessfulCompaction(3); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMaterializedViewRebuild.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMaterializedViewRebuild.java index a0bf2608bfbe..d38e6695cb49 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMaterializedViewRebuild.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMaterializedViewRebuild.java @@ -17,20 +17,27 @@ */ package org.apache.hadoop.hive.ql.txn.compactor; -import java.util.Arrays; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; - +import org.apache.hadoop.hive.common.ValidReadTxnList; +import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.junit.Assert; import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; -import static org.apache.hadoop.hive.ql.txn.compactor.TestCompactor.executeStatementOnDriver; import static org.apache.hadoop.hive.ql.txn.compactor.TestCompactor.execSelectAndDumpData; +import static org.apache.hadoop.hive.ql.txn.compactor.TestCompactor.executeStatementOnDriver; import static org.apache.hadoop.hive.ql.txn.compactor.TestCompactor.executeStatementOnDriverSilently; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.when; public class TestMaterializedViewRebuild extends CompactorOnTezTest { @@ -182,4 +189,18 @@ private void assertResult(List expected, List actual) { Assert.assertEquals(expected, actual); } + @Test + public void testMaterializationLockCleaned() throws Exception { + TxnStore txnHandler = TxnUtils.getTxnStore(conf); + OpenTxnsResponse response = txnHandler.openTxns(new OpenTxnRequest(1, "user", "host")); + txnHandler.lockMaterializationRebuild("default", TABLE1, response.getTxn_ids().get(0)); + + //Mimic the lock can be cleaned up + ValidTxnList validTxnList = Mockito.mock(ValidReadTxnList.class); + when(validTxnList.isTxnValid(anyLong())).thenReturn(true); + + long removedCnt = txnHandler.cleanupMaterializationRebuildLocks(validTxnList, 10); + Assert.assertEquals(1, removedCnt); + } + } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java index 0a57c4588337..55d36ed57bcc 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/txn/compactor/TestMmCompactorOnTez.java @@ -85,11 +85,11 @@ public TestMmCompactorOnTez() { CompactorTestUtil.getBaseOrDeltaNames(fs, AcidUtils.deltaFileFilter, table, null)); if (isTez(conf)) { - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, HiveProtoLoggingHook.class.getName()); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, HiveProtoLoggingHook.class.getName()); } // Run a compaction CompactorTestUtil.runCompaction(conf, dbName, tableName, CompactionType.MINOR, true); - conf.setVar(HiveConf.ConfVars.PREEXECHOOKS, StringUtils.EMPTY); + conf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, StringUtils.EMPTY); CompactorTestUtil.runCleaner(conf); verifySuccessulTxn(1); @@ -480,8 +480,8 @@ private void testMmMinorCompactionPartitionedWithBuckets(String fileFormat) thro @Test public void testMmMinorCompactionWithSchemaEvolutionNoBucketsMultipleReducers() throws Exception { HiveConf hiveConf = new HiveConf(conf); - hiveConf.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); - hiveConf.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 2); + hiveConf.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 2); driver = DriverFactory.newDriver(hiveConf); String dbName = "default"; String tblName = "testMmMinorCompactionWithSchemaEvolutionNoBucketsMultipleReducers"; @@ -638,7 +638,7 @@ private void verifyAllContents(String tblName, TestDataProvider dataProvider, * Set to true to cause all transactions to be rolled back, until set back to false. */ private static void rollbackAllTxns(boolean val, IDriver driver) { - driver.getConf().setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, val); + driver.getConf().setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, val); } private boolean isTez(HiveConf conf){ diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java index e8ebf251297d..08626809fb8b 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestBeeLineWithArgs.java @@ -92,7 +92,7 @@ public static void preTests() throws Exception { HiveConf hiveConf = UtilsForTest.getHiveOnTezConfFromDir("../../data/conf/tez/"); hiveConf.setVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); hiveConf.set(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); miniHS2 = new MiniHS2(hiveConf, MiniClusterType.TEZ); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java index 31c29baccf02..819a5b12b021 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/beeline/TestHplSqlViaBeeLine.java @@ -27,6 +27,8 @@ import java.io.File; import java.io.FileOutputStream; import java.io.PrintStream; +import java.sql.Date; +import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -54,7 +56,7 @@ public static void preTests() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_LOCK_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"); hiveConf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE, 10); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); hiveConf.set(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); miniHS2 = new MiniHS2(hiveConf, MiniHS2.MiniClusterType.TEZ); Map confOverlay = new HashMap<>(); @@ -105,7 +107,7 @@ public void testHplSqlProcedure() throws Throwable { "p1();\n" + "SELECT * FROM result;\n" + "/\n"; - testScriptFile(SCRIPT_TEXT, args(), "Hello world"); + testScriptFile(SCRIPT_TEXT, args(), "wrong number of arguments in call to 'p1'. Expected 1 got 0.", OutStream.ERR); } @Test @@ -184,8 +186,8 @@ public void testUdfBoolean() throws Throwable { "BEGIN\n" + " RETURN 'This is ' || b;\n" + "END;\n" + - "SELECT check(col_b) FROM result;\n"; - testScriptFile(SCRIPT_TEXT, args(), "This is true.*This is false"); + "SELECT check(col_b) FROM result ORDER BY col_b ASC;\n"; + testScriptFile(SCRIPT_TEXT, args(), "This is false.*This is true"); } @Test @@ -280,7 +282,7 @@ public void testUdfString() throws Throwable { "BEGIN\n" + " RETURN 'Hello ' || s || '!';\n" + "END;\n" + - "SELECT hello(col_s) FROM result;\n"; + "SELECT hello(col_s) FROM result ORDER BY col_s ASC;\n"; testScriptFile(SCRIPT_TEXT, args(), "Hello Alice!.*Hello Smith!"); } @@ -344,8 +346,8 @@ public void testUdfVarchar() throws Throwable { "BEGIN\n" + " RETURN 'Hello ' || v || '!';\n" + "END;\n" + - "SELECT hello(col_v) FROM result;\n"; - testScriptFile(SCRIPT_TEXT, args(), "Hello Smith!.*Hello Sachin!"); + "SELECT hello(col_v) FROM result ORDER BY col_v ASC;\n"; + testScriptFile(SCRIPT_TEXT, args(), "Hello Sachin!.*Hello Smith!"); } @Test @@ -360,8 +362,8 @@ public void testUdfChar() throws Throwable { "BEGIN\n" + " RETURN 'Hello ' || c || '!';\n" + "END;\n" + - "SELECT hello(col_c) FROM result;\n"; - testScriptFile(SCRIPT_TEXT, args(), "Hello Daya!.*Hello Alice!"); + "SELECT hello(col_c) FROM result ORDER BY col_c ASC;\n"; + testScriptFile(SCRIPT_TEXT, args(), "Hello Alice!.*Hello Daya!"); } @Test @@ -610,12 +612,385 @@ public void testNullCast() throws Throwable { testScriptFile(SCRIPT_TEXT, args(), "^(.(?!(NullPointerException)))*$", OutStream.ERR); } + @Test + public void testACTIVITY_COUNTHplSqlFunction() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (col1 string);\n" + + "INSERT INTO result VALUES('Alice');\n" + + "INSERT INTO result VALUES('Bob');\n" + + "SELECT * FROM result;\n" + + "SELECT ACTIVITY_COUNT;"; + testScriptFile(SCRIPT_TEXT, args(), "2"); + } + + @Test + public void testCASTHplSqlFunction1() throws Throwable { + String SCRIPT_TEXT = "SELECT CAST('Abc' AS CHAR(1));"; + testScriptFile(SCRIPT_TEXT, args(), "A"); + } + + @Test + public void testCASTHplSqlFunction2() throws Throwable { + String SCRIPT_TEXT = "SELECT CAST(TIMESTAMP '2015-03-12 10:58:34.111' AS CHAR(10));"; + testScriptFile(SCRIPT_TEXT, args(), "2015-03-12"); + } + + @Test + public void testCHARHplSqlFunction() throws Throwable { + String SCRIPT_TEXT = "select CHAR(2023)"; + testScriptFile(SCRIPT_TEXT, args(), "2023"); + } + + @Test + public void testCOALESCEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "select COALESCE(null,123,2023)"; + testScriptFile(SCRIPT_TEXT, args(), "123"); + } + + @Test + public void testCONCATHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "select CONCAT('a', 'b', NULL, 'c')"; + testScriptFile(SCRIPT_TEXT, args(), "abc"); + } + + @Test + public void testCURRENTHplSQLFunction1() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT DATE;"; + testCurrentDate(SCRIPT_TEXT); + } + + private void testCurrentDate(String SCRIPT_TEXT) throws Throwable { + Date today = new Date(System.currentTimeMillis()); + testScriptFile(SCRIPT_TEXT, args(), today.toString()); + } + + @Test + public void testCURRENTHplSQLFunction2() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT TIMESTAMP;"; + testCurrentTimestamp(SCRIPT_TEXT); + } + + private void testCurrentTimestamp(String SCRIPT_TEXT) throws Throwable { + Timestamp today = new Timestamp(System.currentTimeMillis()); + String timestamp = today.toString(); + testScriptFile(SCRIPT_TEXT, args(), timestamp.substring(0, timestamp.length() - 9)); + } + + @Test + public void testCURRENTHplSQLFunction3() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT USER;"; + testScriptFile(SCRIPT_TEXT, args(), System.getProperty("user.name")); + } + + @Test + public void testCURRENT_DATEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT_DATE;"; + testCurrentDate(SCRIPT_TEXT); + } + + @Test + public void testCURRENT_TIME_MILLISHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT_TIME_MILLIS();"; + testScriptFile(SCRIPT_TEXT, args(), String.valueOf(System.currentTimeMillis() / 100000)); + } + + @Test + public void testCURRENT_TIMESTAMPHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT_TIMESTAMP;"; + testCurrentTimestamp(SCRIPT_TEXT); + } + + @Test + public void testCURRENT_USERHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT CURRENT_USER;"; + testScriptFile(SCRIPT_TEXT, args(), System.getProperty("user.name")); + } + + @Test + public void testDATEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT DATE('2015-03-12');"; + testScriptFile(SCRIPT_TEXT, args(), "2015-03-12"); + } + + @Test + public void testDECODEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "DECLARE var1 INT DEFAULT 3;\n" + "SELECT DECODE (var1, 1, 'A', 2, 'B', 3, 'C');"; + testScriptFile(SCRIPT_TEXT, args(), "C"); + } + + @Test + public void testFROM_UNIXTIMEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT from_unixtime(1447141681, 'yyyy-MM-dd');"; + testScriptFile(SCRIPT_TEXT, args(), "2015-11-"); + } + + @Test + public void testINSTRHplSQLFunction1() throws Throwable { + String SCRIPT_TEXT = "SELECT INSTR('abc', 'b');"; + testScriptFile(SCRIPT_TEXT, args(), "2"); + } + + @Test + public void testINSTRHplSQLFunction2() throws Throwable { + String SCRIPT_TEXT = "SELECT INSTR('abcabcabc', 'b', 3, 2);"; + testScriptFile(SCRIPT_TEXT, args(), "8"); + } + + @Test + public void testLOWERHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT LOWER('ABC');"; + testScriptFile(SCRIPT_TEXT, args(), "abc"); + } + + @Test + public void testLENHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT LEN('Abc ');"; + testScriptFile(SCRIPT_TEXT, args(), "3"); + } + + @Test + public void testLENGTHHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT LENGTH('Abc ');"; + testScriptFile(SCRIPT_TEXT, args(), "4"); + } + + @Test + public void testMODHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT MOD(5,2);"; + testScriptFile(SCRIPT_TEXT, args(), "1"); + } + + @Test + public void testNOWHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT NOW();"; + testCurrentTimestamp(SCRIPT_TEXT); + } + + @Test + public void testNVLHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT NVL(NULL, 100);"; + testScriptFile(SCRIPT_TEXT, args(), "100"); + } + + @Test + public void testNVL2HplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT NVL2(NULL, 100, 200);"; + testScriptFile(SCRIPT_TEXT, args(), "200"); + } + + @Test + public void testREPLACEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT replace('2016-03-03', '-', '');"; + testScriptFile(SCRIPT_TEXT, args(), "20160303"); + } + + @Test + public void testSUBSTRHplSQLFunction1() throws Throwable { + String SCRIPT_TEXT = "SELECT SUBSTR('Remark', 3);"; + testScriptFile(SCRIPT_TEXT, args(), "mark"); + } + + @Test + public void testSUBSTRHplSQLFunction2() throws Throwable { + String SCRIPT_TEXT = "SELECT SUBSTR('Remark', 3, 3);"; + testScriptFile(SCRIPT_TEXT, args(), "mar"); + } + + @Test + public void testSUBSTRINGHplSQLFunction1() throws Throwable { + String SCRIPT_TEXT = "SELECT SUBSTRING('Remark', 3);"; + testScriptFile(SCRIPT_TEXT, args(), "mark"); + } + + @Test + public void testSUBSTRINGHplSQLFunction2() throws Throwable { + String SCRIPT_TEXT = "SELECT SUBSTRING('Remark', 3, 3);"; + testScriptFile(SCRIPT_TEXT, args(), "mar"); + } + + @Test + public void testSYSDATEHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT SYSDATE;"; + testCurrentTimestamp(SCRIPT_TEXT); + } + + @Test + public void testTIMESTAMP_ISOHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT TIMESTAMP_ISO('2015-03-12');"; + testScriptFile(SCRIPT_TEXT, args(), "2015-03-12 00:00:00"); + } + + @Test + public void testTO_CHARHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT TO_CHAR(CURRENT_DATE);"; + testCurrentDate(SCRIPT_TEXT); + } + + @Test + public void testTO_TIMESTAMPHplSQLFunction1() throws Throwable { + String SCRIPT_TEXT = "SELECT TO_TIMESTAMP('2015-04-02', 'YYYY-MM-DD');"; + testScriptFile(SCRIPT_TEXT, args(), "2015-04-02 00:00:00.0"); + } + + @Test + public void testTO_TIMESTAMPHplSQLFunction2() throws Throwable { + String SCRIPT_TEXT = "SELECT TO_TIMESTAMP('04/02/2015', 'mm/dd/yyyy');"; + testScriptFile(SCRIPT_TEXT, args(), "2015-04-02 00:00:00.0"); + } + + @Test + public void testTO_TIMESTAMPHplSQLFunction3() throws Throwable { + String SCRIPT_TEXT = "SELECT TO_TIMESTAMP('2015-04-02 13:51:31', 'YYYY-MM-DD HH24:MI:SS');"; + testScriptFile(SCRIPT_TEXT, args(), "2015-04-02 13:51:31.0"); + } + + @Test + public void testTRIMHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT '#' || TRIM(' Hello ') || '#';"; + testScriptFile(SCRIPT_TEXT, args(), "#Hello#"); + } + + @Test + public void testUNIX_TIMESTAMPHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT UNIX_TIMESTAMP()"; + testScriptFile(SCRIPT_TEXT, args(), String.valueOf(System.currentTimeMillis()/10000)); + } + + @Test + public void testUPPERHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT UPPER('abc');"; + testScriptFile(SCRIPT_TEXT, args(), "ABC"); + } + + @Test + public void testUSERHplSQLFunction() throws Throwable { + String SCRIPT_TEXT = "SELECT USER;"; + testScriptFile(SCRIPT_TEXT, args(), System.getProperty("user.name")); + } + + @Test + public void testTableAliasInColumnName() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS input;\n" + + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE input (col1 string, col2 int);\n" + + "CREATE TABLE result (res string);\n" + + "INSERT INTO input VALUES('Hive', 2023);\n" + + "CREATE PROCEDURE p1() AS\n" + + " BEGIN\n" + + " FOR rec IN (select tab.col1, tab.col2 num from input tab) LOOP\n" + + " INSERT INTO result VALUES(rec.num || ' = ' || rec.col1);\n" + + " END LOOP;\n" + + "END;\n" + + "p1();\n" + + "SELECT * FROM result;\n"; + testScriptFile(SCRIPT_TEXT, args(), "2023 = Hive"); + } + + @Test + public void testHplSqlProcedureCallingWithAllDefaultValues() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1(s STRING DEFAULT 'default_val', num NUMBER DEFAULT 123)\n" + + "BEGIN\n" + + "INSERT INTO result VALUES(s || ' = ' || num);\n" + + "END;\n" + + "p1();\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "default_val = 123"); + } + + @Test + public void testHplSqlProcedureCallingWithSomeDefaultValues() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1(s STRING DEFAULT 'default_val', num NUMBER DEFAULT 123)\n" + + "BEGIN\n" + + "INSERT INTO result VALUES(s || ' = ' || num);\n" + + "END;\n" + + "p1('Pass_Value');\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "Pass_Value = 123"); + } + + @Test + public void testHplSqlProcedureWithDefaultValues() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1(s STRING DEFAULT 'default_val', num NUMBER)\n" + + "BEGIN\n" + + "INSERT INTO result VALUES(s || ' = ' || num);\n" + + "END;\n" + + "p1(111);\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "wrong number of arguments in call to 'p1'. Expected 2 got 1.", OutStream.ERR); + } + + @Test + public void testHplSqlProcedureWithSomeDefaultValues() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1(s STRING, num NUMBER DEFAULT 123)\n" + + "BEGIN\n" + + "INSERT INTO result VALUES(s || ' = ' || num);\n" + + "END;\n" + + "p1('Passed_Val');\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "Passed_Val = 123"); + } + + @Test + public void testHplSqlProcedureWithDefaultParamCallingWithNamedParameterBinding() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1(s STRING DEFAULT 'default_val', num NUMBER)\n" + + "BEGIN\n" + + "INSERT INTO result VALUES(s || ' = ' || num);\n" + + "END;\n" + + "p1(num => 111);\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "default_val = 111"); + } + + @Test + public void testHplSqlProcedureWithAllDefaultParamsCallingWithNamedParameterBinding() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1(s1 STRING default 'Default S1', s2 string default 'Default S2')\n" + + "BEGIN\n" + + "INSERT INTO result VALUES(s1 || '=' || s2);\n" + + "END;\n" + + "p1(s2 => 'PassedValue S2');\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "Default S1=PassedValue S2"); + } + + @Test + public void testHplSqlProcedureWithoutParameters() throws Throwable { + String SCRIPT_TEXT = + "DROP TABLE IF EXISTS result;\n" + + "CREATE TABLE result (s string);\n" + + "CREATE PROCEDURE p1()\n" + + "BEGIN\n" + + "INSERT INTO result VALUES('No param');\n" + + "END;\n" + + "p1('none');\n" + + "SELECT * FROM result;" ; + testScriptFile(SCRIPT_TEXT, args(), "wrong number of arguments in call to 'p1'. Expected 0 got 1.", OutStream.ERR); + } + private static List args() { return Arrays.asList("-d", BeeLine.BEELINE_DEFAULT_JDBC_DRIVER, "-u", miniHS2.getBaseJdbcURL() + ";mode=hplsql", "-n", userName); } - private void testScriptFile(String scriptText, List argList, String expectedPattern) throws Throwable { testScriptFile(scriptText, argList, expectedPattern, OutStream.OUT); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index d6f285498a1e..821f504bac22 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -205,12 +205,12 @@ public static void setUpBeforeClass() throws Exception { // Create test database and base tables once for all the test Class.forName(driverName); System.setProperty(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, "verbose"); - System.setProperty(ConfVars.HIVEMAPREDMODE.varname, "nonstrict"); + System.setProperty(ConfVars.HIVE_MAPRED_MODE.varname, "nonstrict"); System.setProperty(ConfVars.HIVE_AUTHORIZATION_MANAGER.varname, "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"); System.setProperty(ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION.varname, "false"); - System.setProperty(ConfVars.REPLCMENABLED.varname, "true"); - System.setProperty(ConfVars.REPLCMDIR.varname, "cmroot"); + System.setProperty(ConfVars.REPL_CM_ENABLED.varname, "true"); + System.setProperty(ConfVars.REPL_CM_DIR.varname, "cmroot"); con = getConnection(defaultDbName + ";create=true"); Statement stmt = con.createStatement(); assertNotNull("Statement is null", stmt); @@ -2100,7 +2100,7 @@ public void testSetCommand() throws SQLException { String rline = res.getString(1); assertFalse( "set output must not contain hidden variables such as the metastore password:" + rline, - rline.contains(HiveConf.ConfVars.METASTOREPWD.varname) + rline.contains(HiveConf.ConfVars.METASTORE_PWD.varname) && !(rline.contains(HiveConf.ConfVars.HIVE_CONF_HIDDEN_LIST.varname))); // the only conf allowed to have the metastore pwd keyname is the hidden list configuration // value @@ -2347,7 +2347,7 @@ public void testFetchFirstSetCmds() throws Exception { */ @Test public void testFetchFirstDfsCmds() throws Exception { - String wareHouseDir = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + String wareHouseDir = conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname); execFetchFirst("dfs -ls " + wareHouseDir, DfsProcessor.DFS_RESULT_HEADER, false); } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java index 2436f5f9d8f1..7f03c0ee0c5f 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java @@ -67,7 +67,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.PersistenceManagerProvider; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -85,7 +84,6 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; -import org.junit.Ignore; import org.junit.Test; import static org.apache.hadoop.hive.common.repl.ReplConst.SOURCE_OF_REPLICATION; @@ -217,9 +215,9 @@ private static void startMiniHS2(HiveConf conf) throws Exception { private static void startMiniHS2(HiveConf conf, boolean httpMode) throws Exception { conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false); - conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false); + conf.setBoolVar(ConfVars.HIVE_STATS_COL_AUTOGATHER, false); // store post-exec hooks calls so we can look at them later - conf.setVar(ConfVars.POSTEXECHOOKS, ReadableHook.class.getName() + "," + + conf.setVar(ConfVars.POST_EXEC_HOOKS, ReadableHook.class.getName() + "," + LineageLogger.class.getName()); MiniHS2.Builder builder = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false); if (httpMode) { @@ -807,15 +805,15 @@ public void testSessionScratchDirs() throws Exception { // FS FileSystem fs = miniHS2.getLocalFS(); FsPermission expectedFSPermission = new FsPermission(HiveConf.getVar(conf, - HiveConf.ConfVars.SCRATCHDIRPERMISSION)); + HiveConf.ConfVars.SCRATCH_DIR_PERMISSION)); // Verify scratch dir paths and permission // HDFS scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR) + "/" + userName); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); // Local scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, true); // Downloaded resources dir @@ -831,11 +829,11 @@ public void testSessionScratchDirs() throws Exception { // Verify scratch dir paths and permission // HDFS scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) + "/" + userName); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR) + "/" + userName); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); // Local scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, true); // Downloaded resources dir @@ -982,7 +980,7 @@ public void testRootScratchDir() throws Exception { FsPermission expectedFSPermission = new FsPermission((short)00733); // Verify scratch dir paths and permission // HDFS scratch dir - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); conn.close(); @@ -992,7 +990,7 @@ public void testRootScratchDir() throws Exception { conf.set("hive.exec.scratchdir", tmpDir + "/level1/level2/level3"); startMiniHS2(conf); conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password"); - scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + scratchDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); verifyScratchDir(conf, fs, scratchDirPath, expectedFSPermission, userName, false); conn.close(); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java index aeec57757c21..a9c49efc54d3 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestRestrictedList.java @@ -109,6 +109,8 @@ public static void startServices() throws Exception { addToExpectedRestrictedMap("hive.zookeeper.ssl.truststore.location"); addToExpectedRestrictedMap("hive.zookeeper.ssl.truststore.password"); addToExpectedRestrictedMap("hive.zookeeper.ssl.truststore.type"); + addToExpectedRestrictedMap("hive.iceberg.allow.datafiles.in.table.location.only"); + addToExpectedRestrictedMap("hive.rewrite.data.policy"); checkRestrictedListMatch(); } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java index b43a1b7586de..1942dc194f13 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/cbo_rp_TestJdbcDriver2.java @@ -58,7 +58,6 @@ import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.ql.processors.DfsProcessor; import org.apache.hive.common.util.HiveVersionInfo; import org.apache.hive.jdbc.Utils.JdbcConnectionParams; @@ -2059,7 +2058,7 @@ public void testFetchFirstSetCmds() throws Exception { */ @Test public void testFetchFirstDfsCmds() throws Exception { - String wareHouseDir = conf.get(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + String wareHouseDir = conf.get(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname); execFetchFirst("dfs -ls " + wareHouseDir, DfsProcessor.DFS_RESULT_HEADER, false); } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java b/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java index f088bc651c9f..6321e049f5cb 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/TestDFSErrorHandling.java @@ -29,10 +29,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.shims.HadoopShims.MiniDFSShim; import org.apache.hive.jdbc.miniHS2.MiniHS2; -import org.apache.hive.service.cli.HiveSQLException; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -98,7 +96,7 @@ public void testAccessDenied() throws Exception { miniHS2 != null && miniHS2.isStarted()); Class.forName(MiniHS2.getJdbcDriverName()); - Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCH_DIR)); MiniDFSShim dfs = miniHS2.getDfs(); FileSystem fs = dfs.getFileSystem(); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/auth/jwt/TestHttpJwtAuthentication.java b/itests/hive-unit/src/test/java/org/apache/hive/service/auth/jwt/TestHttpJwtAuthentication.java index 202ff0d481cf..7aa7adb67e98 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/auth/jwt/TestHttpJwtAuthentication.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/auth/jwt/TestHttpJwtAuthentication.java @@ -116,7 +116,7 @@ public static void setupHS2() throws Exception { HiveConf conf = new HiveConf(); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false); - conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false); + conf.setBoolVar(ConfVars.HIVE_STATS_COL_AUTOGATHER, false); conf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, "JWT"); // the content of the URL below is the same as jwtVerificationJWKSFile conf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION_JWT_JWKS_URL, "http://localhost:" + MOCK_JWKS_SERVER_PORT + diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/auth/saml/TestHttpSamlAuthentication.java b/itests/hive-unit/src/test/java/org/apache/hive/service/auth/saml/TestHttpSamlAuthentication.java index d71fdedf7ea9..7d119e9372c2 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/auth/saml/TestHttpSamlAuthentication.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/auth/saml/TestHttpSamlAuthentication.java @@ -33,9 +33,7 @@ import java.io.File; import java.io.IOException; import java.net.InetAddress; -import java.net.InetSocketAddress; import java.net.ServerSocket; -import java.net.Socket; import java.nio.charset.StandardCharsets; import java.sql.ResultSet; import java.sql.SQLException; @@ -53,16 +51,13 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.MetaStoreTestUtils; -import org.apache.hadoop.hive.ql.metadata.TestHive; import org.apache.hive.jdbc.HiveConnection; import org.apache.hive.jdbc.Utils.JdbcConnectionParams; import org.apache.hive.jdbc.miniHS2.MiniHS2; -import org.apache.hive.jdbc.saml.HiveJdbcBrowserClient; import org.apache.hive.jdbc.saml.IJdbcBrowserClient; import org.apache.hive.jdbc.saml.IJdbcBrowserClient.HiveJdbcBrowserException; import org.apache.hive.jdbc.saml.IJdbcBrowserClientFactory; import org.apache.hive.jdbc.saml.SimpleSAMLPhpTestBrowserClient; -import org.apache.thrift.TException; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -108,7 +103,7 @@ public static void setupHS2() throws Exception { HiveConf conf = new HiveConf(); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false); - conf.setBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER, false); + conf.setBoolVar(ConfVars.HIVE_STATS_COL_AUTOGATHER, false); conf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, "SAML"); conf.setVar(ConfVars.HIVE_SERVER2_SAML_IDP_METADATA, idpMetadataFile.getAbsolutePath()); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java index 819838d09120..850fa243d9da 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/TestEmbeddedThriftBinaryCLIService.java @@ -39,7 +39,7 @@ public static void setUpBeforeClass() throws Exception { service = new EmbeddedThriftBinaryCLIService(); HiveConf conf = new HiveConf(); conf.setBoolean("datanucleus.schema.autoCreateTables", true); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); UtilsForTest.expandHiveConfParams(conf); service.init(conf); client = new ThriftCLIServiceClient(service); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java index c7dade3874a9..66325b128c9c 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/cli/operation/TestOperationLoggingAPIWithMr.java @@ -30,6 +30,7 @@ import org.apache.hive.service.cli.RowSet; import org.junit.Assert; import org.junit.BeforeClass; +import org.junit.Ignore; import org.junit.Test; /** @@ -127,6 +128,7 @@ public void testFetchResultsOfLogAsync() throws Exception { } @Test + @Ignore("HIVE-27966") public void testFetchResultsOfLogWithOrientation() throws Exception { // (FETCH_FIRST) execute a sql, and fetch its sql operation log as expected value OperationHandle operationHandle = client.executeStatement(sessionHandle, sql, null); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestGracefulStopHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestGracefulStopHS2.java index 164a63315e41..983e8afd6cd9 100755 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestGracefulStopHS2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestGracefulStopHS2.java @@ -51,7 +51,7 @@ public static void setupBeforeClass() throws Exception { HiveConf conf = new HiveConf(); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_LOGGING_OPERATION_ENABLED, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); conf.setTimeVar(HiveConf.ConfVars.HIVE_SERVER2_GRACEFUL_STOP_TIMEOUT, 60, TimeUnit.SECONDS); MiniHS2.Builder builder = new MiniHS2.Builder().withConf(conf).cleanupLocalDirOnStartup(false); miniHS2 = builder.build(); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java index 1adfdebdf2be..ff36e24b2d13 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2ClearDanglingScratchDir.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.Utils; -import org.apache.hadoop.util.Shell; import org.junit.Assert; import org.junit.Test; @@ -40,13 +39,13 @@ public void testScratchDirCleared() throws Exception { conf.set(HiveConf.ConfVars.HIVE_SCRATCH_DIR_LOCK.toString(), "true"); conf.set(HiveConf.ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR.toString(), "true"); - Path scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); m_dfs.getFileSystem().mkdirs(scratchDir); m_dfs.getFileSystem().setPermission(scratchDir, new FsPermission("777")); // Fake two live session SessionState.start(conf); - conf.setVar(HiveConf.ConfVars.HIVESESSIONID, UUID.randomUUID().toString()); + conf.setVar(HiveConf.ConfVars.HIVE_SESSION_ID, UUID.randomUUID().toString()); SessionState.start(conf); // Fake dead session diff --git a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java index 15cfb190d5e0..79ddd77556cd 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/service/server/TestHS2SessionHive.java @@ -186,7 +186,7 @@ public static void setupBeforeClass() throws Exception { throw e; } - miniHS2.getHiveConf().setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + miniHS2.getHmsPort()); + miniHS2.getHiveConf().setVar(HiveConf.ConfVars.METASTORE_URIS, "thrift://localhost:" + miniHS2.getHmsPort()); try (Connection conn = DriverManager. getConnection(miniHS2.getJdbcURL(), System.getProperty("user.name"), ""); diff --git a/itests/pom.xml b/itests/pom.xml index 6f2cdad408d4..45aa288ab816 100644 --- a/itests/pom.xml +++ b/itests/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml index 160de3db6552..0271550172e5 100644 --- a/itests/qtest-accumulo/pom.xml +++ b/itests/qtest-accumulo/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-qfile-accumulo diff --git a/itests/qtest-druid/pom.xml b/itests/qtest-druid/pom.xml index f8fbeb9c3ca7..ccd59a385235 100644 --- a/itests/qtest-druid/pom.xml +++ b/itests/qtest-druid/pom.xml @@ -21,7 +21,7 @@ hive-it org.apache.hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-druid @@ -33,7 +33,7 @@ 1.15.0 4.0.0 1.19.3 - 9.4.40.v20210413 + 9.4.45.v20220203 10.11.1.1 16.0.1 4.1.0 diff --git a/itests/qtest-iceberg/pom.xml b/itests/qtest-iceberg/pom.xml index 1a7ad8be74ac..d93ee09251e6 100644 --- a/itests/qtest-iceberg/pom.xml +++ b/itests/qtest-iceberg/pom.xml @@ -3,7 +3,7 @@ hive-it org.apache.hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 diff --git a/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/TestIcebergLlapLocalCompactorCliDriver.java b/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/TestIcebergLlapLocalCompactorCliDriver.java new file mode 100644 index 000000000000..795cc3cc09fb --- /dev/null +++ b/itests/qtest-iceberg/src/test/java/org/apache/hadoop/hive/cli/TestIcebergLlapLocalCompactorCliDriver.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.cli; + +import java.io.File; +import java.util.List; + +import org.apache.hadoop.hive.cli.control.CliAdapter; +import org.apache.hadoop.hive.cli.control.CliConfigs; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestRule; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.ql.txn.compactor.Worker; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import java.util.concurrent.atomic.AtomicBoolean; + +@RunWith(Parameterized.class) +public class TestIcebergLlapLocalCompactorCliDriver { + + static CliAdapter adapter = new CliConfigs.IcebergLlapLocalCompactorCliConfig().getCliAdapter(); + private static final AtomicBoolean stop = new AtomicBoolean(); + private static Worker worker; + + @Parameters(name ="{0}") + public static List getParameters() throws Exception { + return adapter.getParameters(); + } + + @BeforeClass + public static void setup() throws Exception { + worker = new Worker(); + worker.setConf(SessionState.get().getConf()); + stop.set(false); + worker.init(stop); + worker.start(); + } + + @AfterClass + public static void tearDown(){ + stop.set(true); + } + + @ClassRule + public static TestRule cliClassRule = adapter.buildClassRule(); + + @Rule + public TestRule cliTestRule = adapter.buildTestRule(); + + private String name; + private File qfile; + + public TestIcebergLlapLocalCompactorCliDriver(String name, File qfile) { + this.name = name; + this.qfile = qfile; + } + + @Test + public void testCliDriver() throws Exception { + adapter.runTest(name, qfile); + } +} diff --git a/itests/qtest-kudu/pom.xml b/itests/qtest-kudu/pom.xml index 9924b0c1530d..222fdf99fdc5 100644 --- a/itests/qtest-kudu/pom.xml +++ b/itests/qtest-kudu/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-qfile-kudu diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index 30a6864cef56..941c9e2e1892 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-qfile diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 367b922d1300..290f4e3acad5 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -56,6 +56,7 @@ minillap.query.files=\ cmv_direct.q,\ cmv_direct_with_specified_locations.q,\ cmv_direct_with_suffixed_locations.q,\ + complex_datatype.q,\ create_genericudaf.q,\ create_table.q,\ create_udaf.q,\ @@ -416,6 +417,13 @@ iceberg.llap.query.files=\ vectorized_iceberg_read_orc.q,\ vectorized_iceberg_read_parquet.q +iceberg.llap.query.compactor.files=\ + iceberg_major_compaction_partition_evolution.q,\ + iceberg_major_compaction_partitioned.q,\ + iceberg_major_compaction_schema_evolution.q,\ + iceberg_major_compaction_unpartitioned.q,\ + iceberg_optimize_table_unpartitioned.q + iceberg.llap.only.query.files=\ llap_iceberg_read_orc.q,\ llap_iceberg_read_parquet.q diff --git a/itests/test-serde/pom.xml b/itests/test-serde/pom.xml index 5d30c102713d..96ef671e767d 100644 --- a/itests/test-serde/pom.xml +++ b/itests/test-serde/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-test-serde diff --git a/itests/util/pom.xml b/itests/util/pom.xml index 97ebb9c83aa2..f2e9c2797dcd 100644 --- a/itests/util/pom.xml +++ b/itests/util/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive-it - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-it-util @@ -103,10 +103,6 @@ hive-exec tests - - org.apache.hive - hive-jdbc - org.apache.hive hive-standalone-metastore-common diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java index 7288eaeb9691..5705e5f38b77 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/cli/control/CliConfigs.java @@ -639,6 +639,7 @@ public IcebergCliConfig() { try { setQueryDir("iceberg/iceberg-handler/src/test/queries/positive"); excludesFrom(testConfigProps, "iceberg.llap.only.query.files"); + excludesFrom(testConfigProps, "iceberg.llap.query.compactor.files"); setResultsDir("iceberg/iceberg-handler/src/test/results/positive"); setLogDir("itests/qtest/target/qfile-results/iceberg-handler/positive"); @@ -691,4 +692,27 @@ public IcebergLlapLocalCliConfig() { } } } + + public static class IcebergLlapLocalCompactorCliConfig extends AbstractCliConfig { + + public IcebergLlapLocalCompactorCliConfig() { + super(CoreCliDriver.class); + try { + setQueryDir("iceberg/iceberg-handler/src/test/queries/positive"); + + includesFrom(testConfigProps, "iceberg.llap.query.compactor.files"); + + setResultsDir("iceberg/iceberg-handler/src/test/results/positive/llap"); + setLogDir("itests/qtest/target/qfile-results/iceberg-handler/positive"); + + setInitScript("q_test_init_tez.sql"); + setCleanupScript("q_test_cleanup_tez.sql"); + + setHiveConfDir("data/conf/iceberg/llap"); + setClusterType(MiniClusterType.LLAP_LOCAL); + } catch (Exception e) { + throw new RuntimeException("can't contruct cliconfig", e); + } + } + } } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QOutProcessor.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QOutProcessor.java index 5d199ce0239c..da872f17cfcc 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QOutProcessor.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QOutProcessor.java @@ -337,6 +337,10 @@ private final static class PatternReplacementPair { ppm.add(new PatternReplacementPair(Pattern.compile("vertex_[0-9_]+"), "vertex_#ID#")); ppm.add(new PatternReplacementPair(Pattern.compile("task_[0-9_]+"), "task_#ID#")); + // since TEZ-4506, the node is reported with task attempt failures, which needs to be masked + ppm.add(new PatternReplacementPair(Pattern.compile("Error: Node: (.*) : Error while running task"), + "Error: Node: #NODE# : Error while running task")); + ppm.add(new PatternReplacementPair(Pattern.compile("rowcount = [0-9]+(\\.[0-9]+(E[0-9]+)?)?, cumulative cost = \\{.*\\}, id = [0-9]*"), "rowcount = ###Masked###, cumulative cost = ###Masked###, id = ###Masked###")); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java index 184e56f158c1..e8827bda9007 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMetaStoreHandler.java @@ -60,7 +60,7 @@ public boolean isDerby() { } public QTestMetaStoreHandler setMetaStoreConfiguration(HiveConf conf) { - conf.setVar(ConfVars.METASTOREDBTYPE, getDbTypeConfString()); + conf.setVar(ConfVars.METASTORE_DB_TYPE, getDbTypeConfString()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, rule.getJdbcUrl()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, rule.getJdbcDriver()); diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java index 38530f047e3f..e94a842d7636 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestMiniClusters.java @@ -420,7 +420,7 @@ private void createRemoteDirs(HiveConf conf) { // Create remote dirs once. if (getMr() != null) { assert fs != null; - Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTOREWAREHOUSE))); + Path warehousePath = fs.makeQualified(new Path(conf.getVar(ConfVars.METASTORE_WAREHOUSE))); assert warehousePath != null; Path hiveJarPath = fs.makeQualified(new Path(conf.getVar(ConfVars.HIVE_JAR_DIRECTORY))); assert hiveJarPath != null; @@ -570,13 +570,13 @@ private void setFsRelatedProperties(HiveConf conf, boolean isLocalFs, FileSystem conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, fsUriString); // Remote dirs - conf.setVar(ConfVars.METASTOREWAREHOUSE, warehousePath.toString()); + conf.setVar(ConfVars.METASTORE_WAREHOUSE, warehousePath.toString()); conf.setVar(ConfVars.HIVE_JAR_DIRECTORY, jarPath.toString()); conf.setVar(ConfVars.HIVE_USER_INSTALL_DIR, userInstallPath.toString()); - // ConfVars.SCRATCHDIR - {test.tmp.dir}/scratchdir + // ConfVars.SCRATCH_DIR - {test.tmp.dir}/scratchdir // Local dirs - // ConfVars.LOCALSCRATCHDIR - {test.tmp.dir}/localscratchdir + // ConfVars.LOCAL_SCRATCH_DIR - {test.tmp.dir}/localscratchdir // TODO Make sure to cleanup created dirs. } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java index e9c86372bc98..180c6e70d6b6 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java @@ -22,7 +22,6 @@ import java.io.BufferedOutputStream; import java.io.File; -import java.util.LinkedHashSet; import java.io.FileNotFoundException; import java.io.FileOutputStream; import java.io.FileWriter; @@ -570,7 +569,7 @@ public void postInit() throws Exception { sem = new SemanticAnalyzer(new QueryState.Builder().withHiveConf(conf).build()); - testWarehouse = conf.getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); + testWarehouse = conf.getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE); db = Hive.get(conf); pd = new ParseDriver(); @@ -750,7 +749,7 @@ private CommandProcessorResponse executeClientInternal(String commands) throws C * if you want to use another hive cmd after the failure to sanity check the state of the system. */ private boolean ignoreErrors() { - return conf.getBoolVar(HiveConf.ConfVars.CLIIGNOREERRORS); + return conf.getBoolVar(HiveConf.ConfVars.CLI_IGNORE_ERRORS); } boolean isHiveCommand(String command) { @@ -775,7 +774,7 @@ private CommandProcessorResponse executeTestCommand(String command) throws Comma //replace ${hiveconf:hive.metastore.warehouse.dir} with actual dir if existed. //we only want the absolute path, so remove the header, such as hdfs://localhost:57145 String wareHouseDir = - SessionState.get().getConf().getVar(ConfVars.METASTOREWAREHOUSE).replaceAll("^[a-zA-Z]+://.*?:\\d+", ""); + SessionState.get().getConf().getVar(ConfVars.METASTORE_WAREHOUSE).replaceAll("^[a-zA-Z]+://.*?:\\d+", ""); commandArgs = commandArgs.replaceAll("\\$\\{hiveconf:hive\\.metastore\\.warehouse\\.dir\\}", wareHouseDir); if (SessionState.get() != null) { diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java index 43cb8c9615cf..6e5262213444 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/hooks/MapJoinCounterHook.java @@ -30,7 +30,7 @@ public class MapJoinCounterHook implements ExecuteWithHookContext { public void run(HookContext hookContext) { HiveConf conf = hookContext.getConf(); - boolean enableConvert = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECONVERTJOIN); + boolean enableConvert = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CONVERT_JOIN); if (!enableConvert) { return; } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java index a4ac92b62570..e705c12f8db7 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsAggregator.java @@ -34,7 +34,7 @@ public class DummyStatsAggregator implements StatsAggregator { // denotes the method which needs to throw an error. @Override public boolean connect(StatsCollectionContext scc) { - errorMethod = HiveConf.getVar(scc.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATAGGR); + errorMethod = HiveConf.getVar(scc.getHiveConf(), HiveConf.ConfVars.HIVE_TEST_MODE_DUMMY_STAT_AGGR); if (errorMethod.equalsIgnoreCase("connect")) { return false; } diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java index cc80fc2b3e6a..41e475fd7a17 100644 --- a/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java +++ b/itests/util/src/main/java/org/apache/hadoop/hive/ql/stats/DummyStatsPublisher.java @@ -37,7 +37,7 @@ public class DummyStatsPublisher implements StatsPublisher { // denotes the method which needs to throw an error. @Override public boolean init(StatsCollectionContext context) { - errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB); + errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVE_TEST_MODE_DUMMY_STAT_PUB); if (errorMethod.equalsIgnoreCase("init")) { return false; } @@ -47,7 +47,7 @@ public boolean init(StatsCollectionContext context) { @Override public boolean connect(StatsCollectionContext context) { - errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVETESTMODEDUMMYSTATPUB); + errorMethod = HiveConf.getVar(context.getHiveConf(), HiveConf.ConfVars.HIVE_TEST_MODE_DUMMY_STAT_PUB); if (errorMethod.equalsIgnoreCase("connect")) { return false; } diff --git a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java index d21b76418607..069d58c6a0b0 100644 --- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java +++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/AbstractHiveService.java @@ -90,13 +90,13 @@ public void clearProperties() { * @return */ public Path getWareHouseDir() { - return new Path(hiveConf.getVar(ConfVars.METASTOREWAREHOUSE)); + return new Path(hiveConf.getVar(ConfVars.METASTORE_WAREHOUSE)); } public void setWareHouseDir(String wareHouseURI) { verifyNotStarted(); - System.setProperty(ConfVars.METASTOREWAREHOUSE.varname, wareHouseURI); - hiveConf.setVar(ConfVars.METASTOREWAREHOUSE, wareHouseURI); + System.setProperty(ConfVars.METASTORE_WAREHOUSE.varname, wareHouseURI); + hiveConf.setVar(ConfVars.METASTORE_WAREHOUSE, wareHouseURI); } /** diff --git a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 9e95d3b2db92..8076a0718ed7 100644 --- a/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ b/itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -348,12 +348,12 @@ private MiniHS2(HiveConf hiveConf, MiniClusterType miniClusterType, boolean useM Path scratchDir = new Path(baseFsDir, "scratch"); // Create root scratchdir with write all, so that user impersonation has no issues. Utilities.createDirsWithPermission(hiveConf, scratchDir, WRITE_ALL_PERM, true); - System.setProperty(HiveConf.ConfVars.SCRATCHDIR.varname, scratchDir.toString()); - hiveConf.setVar(ConfVars.SCRATCHDIR, scratchDir.toString()); + System.setProperty(HiveConf.ConfVars.SCRATCH_DIR.varname, scratchDir.toString()); + hiveConf.setVar(ConfVars.SCRATCH_DIR, scratchDir.toString()); String localScratchDir = baseDir.getPath() + File.separator + "scratch"; - System.setProperty(HiveConf.ConfVars.LOCALSCRATCHDIR.varname, localScratchDir); - hiveConf.setVar(ConfVars.LOCALSCRATCHDIR, localScratchDir); + System.setProperty(HiveConf.ConfVars.LOCAL_SCRATCH_DIR.varname, localScratchDir); + hiveConf.setVar(ConfVars.LOCAL_SCRATCH_DIR, localScratchDir); } public MiniHS2(HiveConf hiveConf) throws Exception { diff --git a/jdbc-handler/pom.xml b/jdbc-handler/pom.xml index 20705c1d69bf..11fb20a09a95 100644 --- a/jdbc-handler/pom.xml +++ b/jdbc-handler/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-jdbc-handler diff --git a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java index f7f3dd65ddf4..94cd398a9d41 100644 --- a/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java +++ b/jdbc-handler/src/main/java/org/apache/hive/storage/jdbc/conf/JdbcStorageConfigManager.java @@ -192,11 +192,11 @@ private static void resolveMetadata(Properties props) throws HiveException, IOEx } private static String getMetastoreDatabaseType(HiveConf conf) { - return conf.getVar(HiveConf.ConfVars.METASTOREDBTYPE); + return conf.getVar(HiveConf.ConfVars.METASTORE_DB_TYPE); } private static String getMetastoreConnectionURL(HiveConf conf) { - return conf.getVar(HiveConf.ConfVars.METASTORECONNECTURLKEY); + return conf.getVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY); } private static String getMetastoreDriver(HiveConf conf) { @@ -209,6 +209,6 @@ private static String getMetastoreJdbcUser(HiveConf conf) { private static String getMetastoreJdbcPasswd(HiveConf conf) throws IOException { return ShimLoader.getHadoopShims().getPassword(conf, - HiveConf.ConfVars.METASTOREPWD.varname); + HiveConf.ConfVars.METASTORE_PWD.varname); } } diff --git a/jdbc/pom.xml b/jdbc/pom.xml index b9080be1898d..100783f92404 100644 --- a/jdbc/pom.xml +++ b/jdbc/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-jdbc @@ -82,6 +82,14 @@ org.apache.zookeeper zookeeper + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + io.netty * diff --git a/kafka-handler/pom.xml b/kafka-handler/pom.xml index 43d72065fc6a..978c7aba7f87 100644 --- a/kafka-handler/pom.xml +++ b/kafka-handler/pom.xml @@ -20,7 +20,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml 4.0.0 @@ -121,6 +121,16 @@ org.apache.zookeeper zookeeper test + + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + + io.confluent diff --git a/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java b/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java index 1ddda8e6992c..19048c1da426 100644 --- a/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java +++ b/kafka-handler/src/java/org/apache/hadoop/hive/kafka/KafkaOutputFormat.java @@ -67,7 +67,7 @@ public class KafkaOutputFormat implements HiveOutputFormat> getRecords(String topic) { + return IntStream.range(0, RECORD_NUMBER).mapToObj(number -> { + final byte[] value = ("VALUE-" + Integer.toString(number)).getBytes(Charset.forName("UTF-8")); + return new ConsumerRecord<>(topic, 0, (long) number, 0L, null, 0L, 0, 0, KEY_BYTES, value); + }).collect(Collectors.toList()); + } + + private KafkaConsumer consumer; + + private Properties consumerProps; + + private static final String RESULT = "Partition(topic = TEST1, partition = 0, leader = 0, replicas = [0], isr = [0]," + + " offlineReplicas = []) [start offset = [0], end offset = [" + RECORD_NUMBER + "]]"; + + @BeforeClass + public static void setupCluster() throws Throwable { + KAFKA_BROKER_RESOURCE.before(); + sendData(); + } + + @AfterClass + public static void tearDownCluster() { + KAFKA_BROKER_RESOURCE.deleteTopic(TOPIC); + KAFKA_BROKER_RESOURCE.after(); + } + + @Before + public void setUp() { + setupConsumer(); + } + + @After + public void tearDown() { + consumer.close(); + } + + private void setupConsumer() { + consumerProps = new Properties(); + consumerProps.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); + consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none"); + consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaBrokerResource.BROKER_IP_PORT); + consumerProps.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); + consumerProps.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName()); + //The configuration values are not default and are given randomly , these can be changed and tested if required + consumerProps.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, "3002"); + consumerProps.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "3001"); + consumerProps.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "3001"); + consumerProps.setProperty(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "100"); + LOG.info("setting up kafka consumer with props {}", consumerProps); + this.consumer = new KafkaConsumer<>(consumerProps); + } + + @Test + public void testFormatAsText() { + KafkaStorageHandlerInfo storageHandlerInfo = new KafkaStorageHandlerInfo(TOPIC, consumerProps); + Assert.assertEquals(String.class, storageHandlerInfo.formatAsText().getClass()); + String text = storageHandlerInfo.formatAsText(); + Assert.assertEquals(RESULT, text); + } + + private static void sendData() { + List> RECORDS = getRecords(TOPIC); + Properties producerProps = new Properties(); + producerProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaBrokerResource.BROKER_IP_PORT); + producerProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + "org.apache.kafka.common.serialization.ByteArraySerializer"); + producerProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + "org.apache.kafka.common.serialization.ByteArraySerializer"); + LOG.info("Setting up kafka producer with props {}", producerProps); + + KafkaProducer producer = new KafkaProducer<>(producerProps); + LOG.info("Kafka producer started \n Sending [{}] records", RECORDS.size()); + RECORDS.stream() + .map(consumerRecord -> new ProducerRecord<>(consumerRecord.topic(), + consumerRecord.partition(), + consumerRecord.timestamp(), + consumerRecord.key(), + consumerRecord.value())) + .forEach(producer::send); + producer.close(); + } +} diff --git a/kafka-handler/src/test/org/apache/hadoop/hive/kafka/TransactionalKafkaWriterTest.java b/kafka-handler/src/test/org/apache/hadoop/hive/kafka/TransactionalKafkaWriterTest.java index 07a3b5a37fea..760266a427fb 100644 --- a/kafka-handler/src/test/org/apache/hadoop/hive/kafka/TransactionalKafkaWriterTest.java +++ b/kafka-handler/src/test/org/apache/hadoop/hive/kafka/TransactionalKafkaWriterTest.java @@ -118,7 +118,7 @@ public TransactionalKafkaWriterTest() throws IOException { temporaryFolder.create(); Path tableLocation = new Path(temporaryFolder.newFolder().toURI()); queryWorkingPath = new Path(tableLocation, queryId); - configuration.set(HiveConf.ConfVars.HIVEQUERYID.varname, queryId); + configuration.set(HiveConf.ConfVars.HIVE_QUERY_ID.varname, queryId); String taskId = "attempt_m_0001_0"; configuration.set("mapred.task.id", taskId); configuration.set(KafkaTableProperties.HIVE_KAFKA_BOOTSTRAP_SERVERS.getName(), KafkaBrokerResource.BROKER_IP_PORT); diff --git a/kudu-handler/pom.xml b/kudu-handler/pom.xml index faf94f51911b..c681762fce92 100644 --- a/kudu-handler/pom.xml +++ b/kudu-handler/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-kudu-handler diff --git a/llap-client/pom.xml b/llap-client/pom.xml index 65e8fa22c7b8..9215f68c5aee 100644 --- a/llap-client/pom.xml +++ b/llap-client/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-llap-client @@ -55,6 +55,14 @@ org.apache.zookeeper zookeeper + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + log4j log4j diff --git a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java index 25cb89a37567..1c4df16b7643 100644 --- a/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java +++ b/llap-client/src/java/org/apache/hadoop/hive/llap/registry/impl/LlapZookeeperRegistryImpl.java @@ -168,6 +168,16 @@ public String register() throws IOException { String uniqueId = UNIQUE_ID.toString(); long znodeCreationTimeout = 120; + /* When no LLAP executors are running, the getInstances()/ llapRegistryService.getInstances() method results in + InvalidACL exception when Hive tries to evict the cache in DROP DATABASE/TABLE + + As ContainerManager of zookeeper makes sure to cleanup znodes periodically, once query-coordinator zookeeper + client session is terminated,entire path will get deleted sooner or later. This results in InvalidACL exception + + PersistentNode created on server will be preserved through restarts of query executor and makes sure + proactive eviction call of DROP DATABASE/ DROP TABLE go through + successfully. */ + ensurePersistentNodePath(daemonZkRecord); initializeWithoutRegisteringInternal(); // Create a znode under the rootNamespace parent for this instance of the server try { diff --git a/llap-client/src/java/org/apache/hadoop/hive/registry/impl/ZkRegistryBase.java b/llap-client/src/java/org/apache/hadoop/hive/registry/impl/ZkRegistryBase.java index 9da200f2e928..f6a52cd3b2d0 100644 --- a/llap-client/src/java/org/apache/hadoop/hive/registry/impl/ZkRegistryBase.java +++ b/llap-client/src/java/org/apache/hadoop/hive/registry/impl/ZkRegistryBase.java @@ -29,6 +29,8 @@ import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.lang3.StringUtils; import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.api.ACLProvider; import org.apache.curator.framework.imps.CuratorFrameworkState; @@ -55,6 +57,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.zookeeper.CreateMode; +import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.InvalidACLException; import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.ZooDefs; @@ -114,6 +117,8 @@ public abstract class ZkRegistryBase { private PersistentNode znode; private String znodePath; // unique identity for this instance + final String namespace; + private PathChildrenCache instancesCache; // Created on demand. /** Local hostname. */ @@ -160,7 +165,7 @@ public ZkRegistryBase(String instanceName, Configuration conf, String rootNs, St this.stateChangeListeners = new HashSet<>(); this.pathToInstanceCache = new ConcurrentHashMap<>(); this.nodeToInstanceCache = new ConcurrentHashMap<>(); - final String namespace = getRootNamespace(conf, rootNs, nsPrefix); + this.namespace = getRootNamespace(conf, rootNs, nsPrefix); ACLProvider aclProvider; // get acl provider for most outer path that is non-null if (userPathPrefix == null) { @@ -353,6 +358,30 @@ protected final void updateServiceRecord( } } + @VisibleForTesting + public String getPersistentNodePath() { + return "/" + PATH_JOINER.join(namespace, StringUtils.substringBetween(workersPath, "/", "/"), "pnode0"); + } + + protected void ensurePersistentNodePath(ServiceRecord srv) throws IOException { + String pNodePath = getPersistentNodePath(); + try { + LOG.info("Check if persistent node path {} exists, create if not", pNodePath); + if (zooKeeperClient.checkExists().forPath(pNodePath) == null) { + zooKeeperClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT) + .forPath(pNodePath, encoder.toBytes(srv)); + LOG.info("Created persistent path at: {}", pNodePath); + } + } catch (Exception e) { + // throw exception if it is other than NODEEXISTS. + if (!(e instanceof KeeperException) || ((KeeperException) e).code() != KeeperException.Code.NODEEXISTS) { + LOG.error("Unable to create a persistent znode for this server instance", e); + throw new IOException(e); + } else { + LOG.debug("Ignoring KeeperException while ensuring path as the parent node {} already exists.", pNodePath); + } + } + } final protected void initializeWithoutRegisteringInternal() throws IOException { // Create a znode under the rootNamespace parent for this instance of the server diff --git a/llap-client/src/test/org/apache/hadoop/hive/llap/registry/impl/TestLlapZookeeperRegistryImpl.java b/llap-client/src/test/org/apache/hadoop/hive/llap/registry/impl/TestLlapZookeeperRegistryImpl.java index 50799f2f5cab..7b227d4356bc 100644 --- a/llap-client/src/test/org/apache/hadoop/hive/llap/registry/impl/TestLlapZookeeperRegistryImpl.java +++ b/llap-client/src/test/org/apache/hadoop/hive/llap/registry/impl/TestLlapZookeeperRegistryImpl.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.llap.registry.LlapServiceInstance; import org.apache.hadoop.hive.registry.ServiceInstanceSet; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -47,6 +48,7 @@ public class TestLlapZookeeperRegistryImpl { private CuratorFramework curatorFramework; private TestingServer server; + private final static String NAMESPACE_PREFIX = "llap-"; @Before public void setUp() throws Exception { @@ -124,6 +126,16 @@ public void testUpdate() throws Exception { attributes.get(LlapRegistryService.LLAP_DAEMON_NUM_ENABLED_EXECUTORS)); } + @Test + public void testPersistentNodePath() { + String llapRootNameSpace = "/" + LlapZookeeperRegistryImpl.getRootNamespace(hiveConf, + HiveConf.getVar(hiveConf, HiveConf.ConfVars.LLAP_ZK_REGISTRY_NAMESPACE), NAMESPACE_PREFIX); + String persistentNodeName = "/pnode0"; + + Assert.assertEquals(llapRootNameSpace + "/user-" + System.getProperty("user.name") + persistentNodeName, + registry.getPersistentNodePath()); + } + static void trySetMock(Object o, String field, T value) { try { Field fieldToChange = Arrays.stream(FieldUtils.getAllFields(o.getClass())) diff --git a/llap-common/pom.xml b/llap-common/pom.xml index 38fc22d6bd23..db5af98d5939 100644 --- a/llap-common/pom.xml +++ b/llap-common/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-llap-common diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java index b1526cc0771c..b50448053bfa 100644 --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/daemon/rpc/LlapDaemonProtocolProtos.java @@ -5,14 +5,8 @@ public final class LlapDaemonProtocolProtos { private LlapDaemonProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistryLite registry) { - } - public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions( - (com.google.protobuf.ExtensionRegistryLite) registry); } /** * Protobuf enum {@code SourceStateProto} @@ -22,11 +16,11 @@ public enum SourceStateProto /** * S_SUCCEEDED = 1; */ - S_SUCCEEDED(1), + S_SUCCEEDED(0, 1), /** * S_RUNNING = 2; */ - S_RUNNING(2), + S_RUNNING(1, 2), ; /** @@ -39,25 +33,9 @@ public enum SourceStateProto public static final int S_RUNNING_VALUE = 2; - public final int getNumber() { - return value; - } + public final int getNumber() { return value; } - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated public static SourceStateProto valueOf(int value) { - return forNumber(value); - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - */ - public static SourceStateProto forNumber(int value) { switch (value) { case 1: return S_SUCCEEDED; case 2: return S_RUNNING; @@ -69,17 +47,17 @@ public static SourceStateProto forNumber(int value) { internalGetValueMap() { return internalValueMap; } - private static final com.google.protobuf.Internal.EnumLiteMap< - SourceStateProto> internalValueMap = + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public SourceStateProto findValueByNumber(int number) { - return SourceStateProto.forNumber(number); + return SourceStateProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { - return getDescriptor().getValues().get(ordinal()); + return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { @@ -101,9 +79,11 @@ public static SourceStateProto valueOf( return VALUES[desc.getIndex()]; } + private final int index; private final int value; - private SourceStateProto(int value) { + private SourceStateProto(int index, int value) { + this.index = index; this.value = value; } @@ -118,15 +98,15 @@ public enum SubmissionStateProto /** * ACCEPTED = 1; */ - ACCEPTED(1), + ACCEPTED(0, 1), /** * REJECTED = 2; */ - REJECTED(2), + REJECTED(1, 2), /** * EVICTED_OTHER = 3; */ - EVICTED_OTHER(3), + EVICTED_OTHER(2, 3), ; /** @@ -143,25 +123,9 @@ public enum SubmissionStateProto public static final int EVICTED_OTHER_VALUE = 3; - public final int getNumber() { - return value; - } + public final int getNumber() { return value; } - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated public static SubmissionStateProto valueOf(int value) { - return forNumber(value); - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - */ - public static SubmissionStateProto forNumber(int value) { switch (value) { case 1: return ACCEPTED; case 2: return REJECTED; @@ -174,17 +138,17 @@ public static SubmissionStateProto forNumber(int value) { internalGetValueMap() { return internalValueMap; } - private static final com.google.protobuf.Internal.EnumLiteMap< - SubmissionStateProto> internalValueMap = + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public SubmissionStateProto findValueByNumber(int number) { - return SubmissionStateProto.forNumber(number); + return SubmissionStateProto.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { - return getDescriptor().getValues().get(ordinal()); + return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { @@ -206,38 +170,37 @@ public static SubmissionStateProto valueOf( return VALUES[desc.getIndex()]; } + private final int index; private final int value; - private SubmissionStateProto(int value) { + private SubmissionStateProto(int index, int value) { + this.index = index; this.value = value; } // @@protoc_insertion_point(enum_scope:SubmissionStateProto) } - public interface UserPayloadProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:UserPayloadProto) - com.google.protobuf.MessageOrBuilder { + public interface UserPayloadProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional bytes user_payload = 1; /** * optional bytes user_payload = 1; - * @return Whether the userPayload field is set. */ boolean hasUserPayload(); /** * optional bytes user_payload = 1; - * @return The userPayload. */ com.google.protobuf.ByteString getUserPayload(); + // optional int32 version = 2; /** * optional int32 version = 2; - * @return Whether the version field is set. */ boolean hasVersion(); /** * optional int32 version = 2; - * @return The version. */ int getVersion(); } @@ -245,38 +208,35 @@ public interface UserPayloadProtoOrBuilder extends * Protobuf type {@code UserPayloadProto} */ public static final class UserPayloadProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:UserPayloadProto) - UserPayloadProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements UserPayloadProtoOrBuilder { // Use UserPayloadProto.newBuilder() to construct. - private UserPayloadProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private UserPayloadProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private UserPayloadProto() { - userPayload_ = com.google.protobuf.ByteString.EMPTY; + private UserPayloadProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UserPayloadProto defaultInstance; + public static UserPayloadProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new UserPayloadProto(); + public UserPayloadProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private UserPayloadProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -288,6 +248,13 @@ private UserPayloadProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { bitField0_ |= 0x00000001; userPayload_ = input.readBytes(); @@ -298,22 +265,13 @@ private UserPayloadProto( version_ = input.readInt32(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -324,95 +282,112 @@ private UserPayloadProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UserPayloadProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UserPayloadProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional bytes user_payload = 1; public static final int USER_PAYLOAD_FIELD_NUMBER = 1; private com.google.protobuf.ByteString userPayload_; /** * optional bytes user_payload = 1; - * @return Whether the userPayload field is set. */ - @java.lang.Override public boolean hasUserPayload() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bytes user_payload = 1; - * @return The userPayload. */ - @java.lang.Override public com.google.protobuf.ByteString getUserPayload() { return userPayload_; } + // optional int32 version = 2; public static final int VERSION_FIELD_NUMBER = 2; private int version_; /** * optional int32 version = 2; - * @return Whether the version field is set. */ - @java.lang.Override public boolean hasVersion() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 version = 2; - * @return The version. */ - @java.lang.Override public int getVersion() { return version_; } + private void initFields() { + userPayload_ = com.google.protobuf.ByteString.EMPTY; + version_ = 0; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, userPayload_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, version_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, userPayload_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, version_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -423,27 +398,30 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto) obj; - if (hasUserPayload() != other.hasUserPayload()) return false; + boolean result = true; + result = result && (hasUserPayload() == other.hasUserPayload()); if (hasUserPayload()) { - if (!getUserPayload() - .equals(other.getUserPayload())) return false; + result = result && getUserPayload() + .equals(other.getUserPayload()); } - if (hasVersion() != other.hasVersion()) return false; + result = result && (hasVersion() == other.hasVersion()); if (hasVersion()) { - if (getVersion() - != other.getVersion()) return false; + result = result && (getVersion() + == other.getVersion()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasUserPayload()) { hash = (37 * hash) + USER_PAYLOAD_FIELD_NUMBER; hash = (53 * hash) + getUserPayload().hashCode(); @@ -452,22 +430,11 @@ public int hashCode() { hash = (37 * hash) + VERSION_FIELD_NUMBER; hash = (53 * hash) + getVersion(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -491,59 +458,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Us } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -551,16 +505,14 @@ protected Builder newBuilderForType( * Protobuf type {@code UserPayloadProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:UserPayloadProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -573,16 +525,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); userPayload_ = com.google.protobuf.ByteString.EMPTY; @@ -592,18 +546,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UserPayloadProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto result = buildPartial(); if (!result.isInitialized()) { @@ -612,57 +567,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPaylo return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.userPayload_ = userPayload_; - if (((from_bitField0_ & 0x00000002) != 0)) { - result.version_ = version_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } + result.version_ = version_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto)other); @@ -680,17 +601,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasVersion()) { setVersion(other.getVersion()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -700,7 +618,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -710,27 +628,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional bytes user_payload = 1; private com.google.protobuf.ByteString userPayload_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes user_payload = 1; - * @return Whether the userPayload field is set. */ - @java.lang.Override public boolean hasUserPayload() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bytes user_payload = 1; - * @return The userPayload. */ - @java.lang.Override public com.google.protobuf.ByteString getUserPayload() { return userPayload_; } /** * optional bytes user_payload = 1; - * @param value The userPayload to set. - * @return This builder for chaining. */ public Builder setUserPayload(com.google.protobuf.ByteString value) { if (value == null) { @@ -743,7 +656,6 @@ public Builder setUserPayload(com.google.protobuf.ByteString value) { } /** * optional bytes user_payload = 1; - * @return This builder for chaining. */ public Builder clearUserPayload() { bitField0_ = (bitField0_ & ~0x00000001); @@ -752,27 +664,22 @@ public Builder clearUserPayload() { return this; } + // optional int32 version = 2; private int version_ ; /** * optional int32 version = 2; - * @return Whether the version field is set. */ - @java.lang.Override public boolean hasVersion() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 version = 2; - * @return The version. */ - @java.lang.Override public int getVersion() { return version_; } /** * optional int32 version = 2; - * @param value The version to set. - * @return This builder for chaining. */ public Builder setVersion(int value) { bitField0_ |= 0x00000002; @@ -782,7 +689,6 @@ public Builder setVersion(int value) { } /** * optional int32 version = 2; - * @return This builder for chaining. */ public Builder clearVersion() { bitField0_ = (bitField0_ & ~0x00000002); @@ -790,88 +696,43 @@ public Builder clearVersion() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:UserPayloadProto) } - // @@protoc_insertion_point(class_scope:UserPayloadProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public UserPayloadProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UserPayloadProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new UserPayloadProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:UserPayloadProto) } - public interface EntityDescriptorProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:EntityDescriptorProto) - com.google.protobuf.MessageOrBuilder { + public interface EntityDescriptorProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string class_name = 1; /** * optional string class_name = 1; - * @return Whether the className field is set. */ boolean hasClassName(); /** * optional string class_name = 1; - * @return The className. */ java.lang.String getClassName(); /** * optional string class_name = 1; - * @return The bytes for className. */ com.google.protobuf.ByteString getClassNameBytes(); + // optional .UserPayloadProto user_payload = 2; /** * optional .UserPayloadProto user_payload = 2; - * @return Whether the userPayload field is set. */ boolean hasUserPayload(); /** * optional .UserPayloadProto user_payload = 2; - * @return The userPayload. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getUserPayload(); /** @@ -879,14 +740,13 @@ public interface EntityDescriptorProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder getUserPayloadOrBuilder(); + // optional bytes history_text = 3; /** * optional bytes history_text = 3; - * @return Whether the historyText field is set. */ boolean hasHistoryText(); /** * optional bytes history_text = 3; - * @return The historyText. */ com.google.protobuf.ByteString getHistoryText(); } @@ -894,39 +754,35 @@ public interface EntityDescriptorProtoOrBuilder extends * Protobuf type {@code EntityDescriptorProto} */ public static final class EntityDescriptorProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:EntityDescriptorProto) - EntityDescriptorProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements EntityDescriptorProtoOrBuilder { // Use EntityDescriptorProto.newBuilder() to construct. - private EntityDescriptorProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private EntityDescriptorProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private EntityDescriptorProto() { - className_ = ""; - historyText_ = com.google.protobuf.ByteString.EMPTY; + private EntityDescriptorProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EntityDescriptorProto defaultInstance; + public static EntityDescriptorProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new EntityDescriptorProto(); + public EntityDescriptorProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private EntityDescriptorProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -938,15 +794,21 @@ private EntityDescriptorProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - className_ = bs; + className_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = userPayload_.toBuilder(); } userPayload_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.PARSER, extensionRegistry); @@ -962,22 +824,13 @@ private EntityDescriptorProto( historyText_ = input.readBytes(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -988,30 +841,41 @@ private EntityDescriptorProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EntityDescriptorProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EntityDescriptorProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string class_name = 1; public static final int CLASS_NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object className_; + private java.lang.Object className_; /** * optional string class_name = 1; - * @return Whether the className field is set. */ - @java.lang.Override public boolean hasClassName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string class_name = 1; - * @return The className. */ - @java.lang.Override public java.lang.String getClassName() { java.lang.Object ref = className_; if (ref instanceof java.lang.String) { @@ -1028,9 +892,7 @@ public java.lang.String getClassName() { } /** * optional string class_name = 1; - * @return The bytes for className. */ - @java.lang.Override public com.google.protobuf.ByteString getClassNameBytes() { java.lang.Object ref = className_; @@ -1045,99 +907,103 @@ public java.lang.String getClassName() { } } + // optional .UserPayloadProto user_payload = 2; public static final int USER_PAYLOAD_FIELD_NUMBER = 2; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto userPayload_; /** * optional .UserPayloadProto user_payload = 2; - * @return Whether the userPayload field is set. */ - @java.lang.Override public boolean hasUserPayload() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .UserPayloadProto user_payload = 2; - * @return The userPayload. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getUserPayload() { - return userPayload_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance() : userPayload_; + return userPayload_; } /** * optional .UserPayloadProto user_payload = 2; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder getUserPayloadOrBuilder() { - return userPayload_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance() : userPayload_; + return userPayload_; } + // optional bytes history_text = 3; public static final int HISTORY_TEXT_FIELD_NUMBER = 3; private com.google.protobuf.ByteString historyText_; /** * optional bytes history_text = 3; - * @return Whether the historyText field is set. */ - @java.lang.Override public boolean hasHistoryText() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes history_text = 3; - * @return The historyText. */ - @java.lang.Override public com.google.protobuf.ByteString getHistoryText() { return historyText_; } + private void initFields() { + className_ = ""; + userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance(); + historyText_ = com.google.protobuf.ByteString.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, className_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getClassNameBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - output.writeMessage(2, getUserPayload()); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, userPayload_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, historyText_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, className_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getClassNameBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getUserPayload()); + .computeMessageSize(2, userPayload_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, historyText_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -1148,32 +1014,35 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto) obj; - if (hasClassName() != other.hasClassName()) return false; + boolean result = true; + result = result && (hasClassName() == other.hasClassName()); if (hasClassName()) { - if (!getClassName() - .equals(other.getClassName())) return false; + result = result && getClassName() + .equals(other.getClassName()); } - if (hasUserPayload() != other.hasUserPayload()) return false; + result = result && (hasUserPayload() == other.hasUserPayload()); if (hasUserPayload()) { - if (!getUserPayload() - .equals(other.getUserPayload())) return false; + result = result && getUserPayload() + .equals(other.getUserPayload()); } - if (hasHistoryText() != other.hasHistoryText()) return false; + result = result && (hasHistoryText() == other.hasHistoryText()); if (hasHistoryText()) { - if (!getHistoryText() - .equals(other.getHistoryText())) return false; + result = result && getHistoryText() + .equals(other.getHistoryText()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasClassName()) { hash = (37 * hash) + CLASS_NAME_FIELD_NUMBER; hash = (53 * hash) + getClassName().hashCode(); @@ -1186,22 +1055,11 @@ public int hashCode() { hash = (37 * hash) + HISTORY_TEXT_FIELD_NUMBER; hash = (53 * hash) + getHistoryText().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -1225,59 +1083,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.En } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -1285,16 +1130,14 @@ protected Builder newBuilderForType( * Protobuf type {@code EntityDescriptorProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:EntityDescriptorProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -1307,23 +1150,25 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getUserPayloadFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); className_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (userPayloadBuilder_ == null) { - userPayload_ = null; + userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance(); } else { userPayloadBuilder_.clear(); } @@ -1333,18 +1178,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EntityDescriptorProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto result = buildPartial(); if (!result.isInitialized()) { @@ -1353,24 +1199,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.className_ = className_; - if (((from_bitField0_ & 0x00000002) != 0)) { - if (userPayloadBuilder_ == null) { - result.userPayload_ = userPayload_; - } else { - result.userPayload_ = userPayloadBuilder_.build(); - } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { + if (userPayloadBuilder_ == null) { + result.userPayload_ = userPayload_; + } else { + result.userPayload_ = userPayloadBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.historyText_ = historyText_; @@ -1379,39 +1224,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto)other); @@ -1434,17 +1246,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasHistoryText()) { setHistoryText(other.getHistoryText()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1454,7 +1263,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -1464,27 +1273,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string class_name = 1; private java.lang.Object className_ = ""; /** * optional string class_name = 1; - * @return Whether the className field is set. */ public boolean hasClassName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string class_name = 1; - * @return The className. */ public java.lang.String getClassName() { java.lang.Object ref = className_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - className_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + className_ = s; return s; } else { return (java.lang.String) ref; @@ -1492,7 +1297,6 @@ public java.lang.String getClassName() { } /** * optional string class_name = 1; - * @return The bytes for className. */ public com.google.protobuf.ByteString getClassNameBytes() { @@ -1509,8 +1313,6 @@ public java.lang.String getClassName() { } /** * optional string class_name = 1; - * @param value The className to set. - * @return This builder for chaining. */ public Builder setClassName( java.lang.String value) { @@ -1524,7 +1326,6 @@ public Builder setClassName( } /** * optional string class_name = 1; - * @return This builder for chaining. */ public Builder clearClassName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -1534,8 +1335,6 @@ public Builder clearClassName() { } /** * optional string class_name = 1; - * @param value The bytes for className to set. - * @return This builder for chaining. */ public Builder setClassNameBytes( com.google.protobuf.ByteString value) { @@ -1548,23 +1347,22 @@ public Builder setClassNameBytes( return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto userPayload_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .UserPayloadProto user_payload = 2; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder> userPayloadBuilder_; /** * optional .UserPayloadProto user_payload = 2; - * @return Whether the userPayload field is set. */ public boolean hasUserPayload() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .UserPayloadProto user_payload = 2; - * @return The userPayload. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto getUserPayload() { if (userPayloadBuilder_ == null) { - return userPayload_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance() : userPayload_; + return userPayload_; } else { return userPayloadBuilder_.getMessage(); } @@ -1604,8 +1402,7 @@ public Builder setUserPayload( */ public Builder mergeUserPayload(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto value) { if (userPayloadBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0) && - userPayload_ != null && + if (((bitField0_ & 0x00000002) == 0x00000002) && userPayload_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance()) { userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.newBuilder(userPayload_).mergeFrom(value).buildPartial(); @@ -1624,7 +1421,7 @@ public Builder mergeUserPayload(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemo */ public Builder clearUserPayload() { if (userPayloadBuilder_ == null) { - userPayload_ = null; + userPayload_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance(); onChanged(); } else { userPayloadBuilder_.clear(); @@ -1647,20 +1444,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPaylo if (userPayloadBuilder_ != null) { return userPayloadBuilder_.getMessageOrBuilder(); } else { - return userPayload_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.getDefaultInstance() : userPayload_; + return userPayload_; } } /** * optional .UserPayloadProto user_payload = 2; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder> getUserPayloadFieldBuilder() { if (userPayloadBuilder_ == null) { - userPayloadBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + userPayloadBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPayloadProtoOrBuilder>( - getUserPayload(), + userPayload_, getParentForChildren(), isClean()); userPayload_ = null; @@ -1668,27 +1464,22 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UserPaylo return userPayloadBuilder_; } + // optional bytes history_text = 3; private com.google.protobuf.ByteString historyText_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes history_text = 3; - * @return Whether the historyText field is set. */ - @java.lang.Override public boolean hasHistoryText() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes history_text = 3; - * @return The historyText. */ - @java.lang.Override public com.google.protobuf.ByteString getHistoryText() { return historyText_; } /** * optional bytes history_text = 3; - * @param value The historyText to set. - * @return This builder for chaining. */ public Builder setHistoryText(com.google.protobuf.ByteString value) { if (value == null) { @@ -1701,7 +1492,6 @@ public Builder setHistoryText(com.google.protobuf.ByteString value) { } /** * optional bytes history_text = 3; - * @return This builder for chaining. */ public Builder clearHistoryText() { bitField0_ = (bitField0_ & ~0x00000004); @@ -1709,88 +1499,43 @@ public Builder clearHistoryText() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:EntityDescriptorProto) } - // @@protoc_insertion_point(class_scope:EntityDescriptorProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public EntityDescriptorProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new EntityDescriptorProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new EntityDescriptorProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:EntityDescriptorProto) } - public interface IOSpecProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:IOSpecProto) - com.google.protobuf.MessageOrBuilder { + public interface IOSpecProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string connected_vertex_name = 1; /** * optional string connected_vertex_name = 1; - * @return Whether the connectedVertexName field is set. */ boolean hasConnectedVertexName(); /** * optional string connected_vertex_name = 1; - * @return The connectedVertexName. */ java.lang.String getConnectedVertexName(); /** * optional string connected_vertex_name = 1; - * @return The bytes for connectedVertexName. */ com.google.protobuf.ByteString getConnectedVertexNameBytes(); + // optional .EntityDescriptorProto io_descriptor = 2; /** * optional .EntityDescriptorProto io_descriptor = 2; - * @return Whether the ioDescriptor field is set. */ boolean hasIoDescriptor(); /** * optional .EntityDescriptorProto io_descriptor = 2; - * @return The ioDescriptor. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getIoDescriptor(); /** @@ -1798,14 +1543,13 @@ public interface IOSpecProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getIoDescriptorOrBuilder(); + // optional int32 physical_edge_count = 3; /** * optional int32 physical_edge_count = 3; - * @return Whether the physicalEdgeCount field is set. */ boolean hasPhysicalEdgeCount(); /** * optional int32 physical_edge_count = 3; - * @return The physicalEdgeCount. */ int getPhysicalEdgeCount(); } @@ -1813,38 +1557,35 @@ public interface IOSpecProtoOrBuilder extends * Protobuf type {@code IOSpecProto} */ public static final class IOSpecProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:IOSpecProto) - IOSpecProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements IOSpecProtoOrBuilder { // Use IOSpecProto.newBuilder() to construct. - private IOSpecProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private IOSpecProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private IOSpecProto() { - connectedVertexName_ = ""; + private IOSpecProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final IOSpecProto defaultInstance; + public static IOSpecProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new IOSpecProto(); + public IOSpecProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private IOSpecProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -1856,15 +1597,21 @@ private IOSpecProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - connectedVertexName_ = bs; + connectedVertexName_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = ioDescriptor_.toBuilder(); } ioDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry); @@ -1880,22 +1627,13 @@ private IOSpecProto( physicalEdgeCount_ = input.readInt32(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -1906,30 +1644,41 @@ private IOSpecProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public IOSpecProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new IOSpecProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string connected_vertex_name = 1; public static final int CONNECTED_VERTEX_NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object connectedVertexName_; + private java.lang.Object connectedVertexName_; /** * optional string connected_vertex_name = 1; - * @return Whether the connectedVertexName field is set. */ - @java.lang.Override public boolean hasConnectedVertexName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string connected_vertex_name = 1; - * @return The connectedVertexName. */ - @java.lang.Override public java.lang.String getConnectedVertexName() { java.lang.Object ref = connectedVertexName_; if (ref instanceof java.lang.String) { @@ -1946,9 +1695,7 @@ public java.lang.String getConnectedVertexName() { } /** * optional string connected_vertex_name = 1; - * @return The bytes for connectedVertexName. */ - @java.lang.Override public com.google.protobuf.ByteString getConnectedVertexNameBytes() { java.lang.Object ref = connectedVertexName_; @@ -1963,99 +1710,103 @@ public java.lang.String getConnectedVertexName() { } } + // optional .EntityDescriptorProto io_descriptor = 2; public static final int IO_DESCRIPTOR_FIELD_NUMBER = 2; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto ioDescriptor_; /** * optional .EntityDescriptorProto io_descriptor = 2; - * @return Whether the ioDescriptor field is set. */ - @java.lang.Override public boolean hasIoDescriptor() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .EntityDescriptorProto io_descriptor = 2; - * @return The ioDescriptor. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getIoDescriptor() { - return ioDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : ioDescriptor_; + return ioDescriptor_; } /** * optional .EntityDescriptorProto io_descriptor = 2; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getIoDescriptorOrBuilder() { - return ioDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : ioDescriptor_; + return ioDescriptor_; } + // optional int32 physical_edge_count = 3; public static final int PHYSICAL_EDGE_COUNT_FIELD_NUMBER = 3; private int physicalEdgeCount_; /** * optional int32 physical_edge_count = 3; - * @return Whether the physicalEdgeCount field is set. */ - @java.lang.Override public boolean hasPhysicalEdgeCount() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 physical_edge_count = 3; - * @return The physicalEdgeCount. */ - @java.lang.Override public int getPhysicalEdgeCount() { return physicalEdgeCount_; } + private void initFields() { + connectedVertexName_ = ""; + ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); + physicalEdgeCount_ = 0; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, connectedVertexName_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getConnectedVertexNameBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - output.writeMessage(2, getIoDescriptor()); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, ioDescriptor_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, physicalEdgeCount_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, connectedVertexName_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getConnectedVertexNameBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getIoDescriptor()); + .computeMessageSize(2, ioDescriptor_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, physicalEdgeCount_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -2066,32 +1817,35 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto) obj; - if (hasConnectedVertexName() != other.hasConnectedVertexName()) return false; + boolean result = true; + result = result && (hasConnectedVertexName() == other.hasConnectedVertexName()); if (hasConnectedVertexName()) { - if (!getConnectedVertexName() - .equals(other.getConnectedVertexName())) return false; + result = result && getConnectedVertexName() + .equals(other.getConnectedVertexName()); } - if (hasIoDescriptor() != other.hasIoDescriptor()) return false; + result = result && (hasIoDescriptor() == other.hasIoDescriptor()); if (hasIoDescriptor()) { - if (!getIoDescriptor() - .equals(other.getIoDescriptor())) return false; + result = result && getIoDescriptor() + .equals(other.getIoDescriptor()); } - if (hasPhysicalEdgeCount() != other.hasPhysicalEdgeCount()) return false; + result = result && (hasPhysicalEdgeCount() == other.hasPhysicalEdgeCount()); if (hasPhysicalEdgeCount()) { - if (getPhysicalEdgeCount() - != other.getPhysicalEdgeCount()) return false; + result = result && (getPhysicalEdgeCount() + == other.getPhysicalEdgeCount()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasConnectedVertexName()) { hash = (37 * hash) + CONNECTED_VERTEX_NAME_FIELD_NUMBER; hash = (53 * hash) + getConnectedVertexName().hashCode(); @@ -2104,22 +1858,11 @@ public int hashCode() { hash = (37 * hash) + PHYSICAL_EDGE_COUNT_FIELD_NUMBER; hash = (53 * hash) + getPhysicalEdgeCount(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -2143,59 +1886,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IO } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -2203,16 +1933,14 @@ protected Builder newBuilderForType( * Protobuf type {@code IOSpecProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:IOSpecProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -2225,23 +1953,25 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getIoDescriptorFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); connectedVertexName_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (ioDescriptorBuilder_ == null) { - ioDescriptor_ = null; + ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); } else { ioDescriptorBuilder_.clear(); } @@ -2251,18 +1981,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_IOSpecProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto result = buildPartial(); if (!result.isInitialized()) { @@ -2271,65 +2002,31 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecPro return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.connectedVertexName_ = connectedVertexName_; - if (((from_bitField0_ & 0x00000002) != 0)) { - if (ioDescriptorBuilder_ == null) { - result.ioDescriptor_ = ioDescriptor_; - } else { - result.ioDescriptor_ = ioDescriptorBuilder_.build(); - } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { - result.physicalEdgeCount_ = physicalEdgeCount_; + if (ioDescriptorBuilder_ == null) { + result.ioDescriptor_ = ioDescriptor_; + } else { + result.ioDescriptor_ = ioDescriptorBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + result.physicalEdgeCount_ = physicalEdgeCount_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto)other); @@ -2352,17 +2049,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasPhysicalEdgeCount()) { setPhysicalEdgeCount(other.getPhysicalEdgeCount()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2372,7 +2066,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -2382,27 +2076,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string connected_vertex_name = 1; private java.lang.Object connectedVertexName_ = ""; /** * optional string connected_vertex_name = 1; - * @return Whether the connectedVertexName field is set. */ public boolean hasConnectedVertexName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string connected_vertex_name = 1; - * @return The connectedVertexName. */ public java.lang.String getConnectedVertexName() { java.lang.Object ref = connectedVertexName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - connectedVertexName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + connectedVertexName_ = s; return s; } else { return (java.lang.String) ref; @@ -2410,7 +2100,6 @@ public java.lang.String getConnectedVertexName() { } /** * optional string connected_vertex_name = 1; - * @return The bytes for connectedVertexName. */ public com.google.protobuf.ByteString getConnectedVertexNameBytes() { @@ -2427,8 +2116,6 @@ public java.lang.String getConnectedVertexName() { } /** * optional string connected_vertex_name = 1; - * @param value The connectedVertexName to set. - * @return This builder for chaining. */ public Builder setConnectedVertexName( java.lang.String value) { @@ -2442,7 +2129,6 @@ public Builder setConnectedVertexName( } /** * optional string connected_vertex_name = 1; - * @return This builder for chaining. */ public Builder clearConnectedVertexName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -2452,8 +2138,6 @@ public Builder clearConnectedVertexName() { } /** * optional string connected_vertex_name = 1; - * @param value The bytes for connectedVertexName to set. - * @return This builder for chaining. */ public Builder setConnectedVertexNameBytes( com.google.protobuf.ByteString value) { @@ -2466,23 +2150,22 @@ public Builder setConnectedVertexNameBytes( return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto ioDescriptor_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .EntityDescriptorProto io_descriptor = 2; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> ioDescriptorBuilder_; /** * optional .EntityDescriptorProto io_descriptor = 2; - * @return Whether the ioDescriptor field is set. */ public boolean hasIoDescriptor() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .EntityDescriptorProto io_descriptor = 2; - * @return The ioDescriptor. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getIoDescriptor() { if (ioDescriptorBuilder_ == null) { - return ioDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : ioDescriptor_; + return ioDescriptor_; } else { return ioDescriptorBuilder_.getMessage(); } @@ -2522,8 +2205,7 @@ public Builder setIoDescriptor( */ public Builder mergeIoDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) { if (ioDescriptorBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0) && - ioDescriptor_ != null && + if (((bitField0_ & 0x00000002) == 0x00000002) && ioDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) { ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(ioDescriptor_).mergeFrom(value).buildPartial(); @@ -2542,7 +2224,7 @@ public Builder mergeIoDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaem */ public Builder clearIoDescriptor() { if (ioDescriptorBuilder_ == null) { - ioDescriptor_ = null; + ioDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); onChanged(); } else { ioDescriptorBuilder_.clear(); @@ -2565,20 +2247,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes if (ioDescriptorBuilder_ != null) { return ioDescriptorBuilder_.getMessageOrBuilder(); } else { - return ioDescriptor_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : ioDescriptor_; + return ioDescriptor_; } } /** * optional .EntityDescriptorProto io_descriptor = 2; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> getIoDescriptorFieldBuilder() { if (ioDescriptorBuilder_ == null) { - ioDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + ioDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>( - getIoDescriptor(), + ioDescriptor_, getParentForChildren(), isClean()); ioDescriptor_ = null; @@ -2586,27 +2267,22 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes return ioDescriptorBuilder_; } + // optional int32 physical_edge_count = 3; private int physicalEdgeCount_ ; /** * optional int32 physical_edge_count = 3; - * @return Whether the physicalEdgeCount field is set. */ - @java.lang.Override public boolean hasPhysicalEdgeCount() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 physical_edge_count = 3; - * @return The physicalEdgeCount. */ - @java.lang.Override public int getPhysicalEdgeCount() { return physicalEdgeCount_; } /** * optional int32 physical_edge_count = 3; - * @param value The physicalEdgeCount to set. - * @return This builder for chaining. */ public Builder setPhysicalEdgeCount(int value) { bitField0_ |= 0x00000004; @@ -2616,7 +2292,6 @@ public Builder setPhysicalEdgeCount(int value) { } /** * optional int32 physical_edge_count = 3; - * @return This builder for chaining. */ public Builder clearPhysicalEdgeCount() { bitField0_ = (bitField0_ & ~0x00000004); @@ -2624,113 +2299,63 @@ public Builder clearPhysicalEdgeCount() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:IOSpecProto) } - // @@protoc_insertion_point(class_scope:IOSpecProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public IOSpecProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new IOSpecProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new IOSpecProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:IOSpecProto) } - public interface GroupInputSpecProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GroupInputSpecProto) - com.google.protobuf.MessageOrBuilder { + public interface GroupInputSpecProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string group_name = 1; /** * optional string group_name = 1; - * @return Whether the groupName field is set. */ boolean hasGroupName(); /** * optional string group_name = 1; - * @return The groupName. */ java.lang.String getGroupName(); /** * optional string group_name = 1; - * @return The bytes for groupName. */ com.google.protobuf.ByteString getGroupNameBytes(); + // repeated string group_vertices = 2; /** * repeated string group_vertices = 2; - * @return A list containing the groupVertices. */ java.util.List - getGroupVerticesList(); + getGroupVerticesList(); /** * repeated string group_vertices = 2; - * @return The count of groupVertices. */ int getGroupVerticesCount(); /** * repeated string group_vertices = 2; - * @param index The index of the element to return. - * @return The groupVertices at the given index. */ java.lang.String getGroupVertices(int index); /** * repeated string group_vertices = 2; - * @param index The index of the value to return. - * @return The bytes of the groupVertices at the given index. */ com.google.protobuf.ByteString getGroupVerticesBytes(int index); + // optional .EntityDescriptorProto merged_input_descriptor = 3; /** * optional .EntityDescriptorProto merged_input_descriptor = 3; - * @return Whether the mergedInputDescriptor field is set. */ boolean hasMergedInputDescriptor(); /** * optional .EntityDescriptorProto merged_input_descriptor = 3; - * @return The mergedInputDescriptor. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getMergedInputDescriptor(); /** @@ -2742,39 +2367,35 @@ public interface GroupInputSpecProtoOrBuilder extends * Protobuf type {@code GroupInputSpecProto} */ public static final class GroupInputSpecProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GroupInputSpecProto) - GroupInputSpecProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GroupInputSpecProtoOrBuilder { // Use GroupInputSpecProto.newBuilder() to construct. - private GroupInputSpecProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GroupInputSpecProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GroupInputSpecProto() { - groupName_ = ""; - groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private GroupInputSpecProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GroupInputSpecProto defaultInstance; + public static GroupInputSpecProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GroupInputSpecProto(); + public GroupInputSpecProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GroupInputSpecProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -2786,24 +2407,29 @@ private GroupInputSpecProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - groupName_ = bs; + groupName_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000002) != 0)) { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { groupVertices_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000002; } - groupVertices_.add(bs); + groupVertices_.add(input.readBytes()); break; } case 26: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = mergedInputDescriptor_.toBuilder(); } mergedInputDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry); @@ -2814,25 +2440,16 @@ private GroupInputSpecProto( bitField0_ |= 0x00000002; break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) != 0)) { - groupVertices_ = groupVertices_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + groupVertices_ = new com.google.protobuf.UnmodifiableLazyStringList(groupVertices_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -2843,30 +2460,41 @@ private GroupInputSpecProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GroupInputSpecProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GroupInputSpecProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string group_name = 1; public static final int GROUP_NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object groupName_; + private java.lang.Object groupName_; /** * optional string group_name = 1; - * @return Whether the groupName field is set. */ - @java.lang.Override public boolean hasGroupName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string group_name = 1; - * @return The groupName. */ - @java.lang.Override public java.lang.String getGroupName() { java.lang.Object ref = groupName_; if (ref instanceof java.lang.String) { @@ -2883,9 +2511,7 @@ public java.lang.String getGroupName() { } /** * optional string group_name = 1; - * @return The bytes for groupName. */ - @java.lang.Override public com.google.protobuf.ByteString getGroupNameBytes() { java.lang.Object ref = groupName_; @@ -2900,119 +2526,122 @@ public java.lang.String getGroupName() { } } + // repeated string group_vertices = 2; public static final int GROUP_VERTICES_FIELD_NUMBER = 2; private com.google.protobuf.LazyStringList groupVertices_; /** * repeated string group_vertices = 2; - * @return A list containing the groupVertices. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getGroupVerticesList() { return groupVertices_; } /** * repeated string group_vertices = 2; - * @return The count of groupVertices. */ public int getGroupVerticesCount() { return groupVertices_.size(); } /** * repeated string group_vertices = 2; - * @param index The index of the element to return. - * @return The groupVertices at the given index. */ public java.lang.String getGroupVertices(int index) { return groupVertices_.get(index); } /** * repeated string group_vertices = 2; - * @param index The index of the value to return. - * @return The bytes of the groupVertices at the given index. */ public com.google.protobuf.ByteString getGroupVerticesBytes(int index) { return groupVertices_.getByteString(index); } + // optional .EntityDescriptorProto merged_input_descriptor = 3; public static final int MERGED_INPUT_DESCRIPTOR_FIELD_NUMBER = 3; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto mergedInputDescriptor_; /** * optional .EntityDescriptorProto merged_input_descriptor = 3; - * @return Whether the mergedInputDescriptor field is set. */ - @java.lang.Override public boolean hasMergedInputDescriptor() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .EntityDescriptorProto merged_input_descriptor = 3; - * @return The mergedInputDescriptor. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getMergedInputDescriptor() { - return mergedInputDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : mergedInputDescriptor_; + return mergedInputDescriptor_; } /** * optional .EntityDescriptorProto merged_input_descriptor = 3; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getMergedInputDescriptorOrBuilder() { - return mergedInputDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : mergedInputDescriptor_; + return mergedInputDescriptor_; } + private void initFields() { + groupName_ = ""; + groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY; + mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, groupName_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getGroupNameBytes()); } for (int i = 0; i < groupVertices_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, groupVertices_.getRaw(i)); + output.writeBytes(2, groupVertices_.getByteString(i)); } - if (((bitField0_ & 0x00000002) != 0)) { - output.writeMessage(3, getMergedInputDescriptor()); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(3, mergedInputDescriptor_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, groupName_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getGroupNameBytes()); } { int dataSize = 0; for (int i = 0; i < groupVertices_.size(); i++) { - dataSize += computeStringSizeNoTag(groupVertices_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(groupVertices_.getByteString(i)); } size += dataSize; size += 1 * getGroupVerticesList().size(); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, getMergedInputDescriptor()); + .computeMessageSize(3, mergedInputDescriptor_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -3023,29 +2652,32 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto) obj; - if (hasGroupName() != other.hasGroupName()) return false; + boolean result = true; + result = result && (hasGroupName() == other.hasGroupName()); if (hasGroupName()) { - if (!getGroupName() - .equals(other.getGroupName())) return false; + result = result && getGroupName() + .equals(other.getGroupName()); } - if (!getGroupVerticesList() - .equals(other.getGroupVerticesList())) return false; - if (hasMergedInputDescriptor() != other.hasMergedInputDescriptor()) return false; + result = result && getGroupVerticesList() + .equals(other.getGroupVerticesList()); + result = result && (hasMergedInputDescriptor() == other.hasMergedInputDescriptor()); if (hasMergedInputDescriptor()) { - if (!getMergedInputDescriptor() - .equals(other.getMergedInputDescriptor())) return false; + result = result && getMergedInputDescriptor() + .equals(other.getMergedInputDescriptor()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasGroupName()) { hash = (37 * hash) + GROUP_NAME_FIELD_NUMBER; hash = (53 * hash) + getGroupName().hashCode(); @@ -3058,22 +2690,11 @@ public int hashCode() { hash = (37 * hash) + MERGED_INPUT_DESCRIPTOR_FIELD_NUMBER; hash = (53 * hash) + getMergedInputDescriptor().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3097,59 +2718,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Gr } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -3157,16 +2765,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GroupInputSpecProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GroupInputSpecProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -3179,17 +2785,19 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getMergedInputDescriptorFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); groupName_ = ""; @@ -3197,7 +2805,7 @@ public Builder clear() { groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00000002); if (mergedInputDescriptorBuilder_ == null) { - mergedInputDescriptor_ = null; + mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); } else { mergedInputDescriptorBuilder_.clear(); } @@ -3205,18 +2813,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GroupInputSpecProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto result = buildPartial(); if (!result.isInitialized()) { @@ -3225,66 +2834,33 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInpu return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.groupName_ = groupName_; - if (((bitField0_ & 0x00000002) != 0)) { - groupVertices_ = groupVertices_.getUnmodifiableView(); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + groupVertices_ = new com.google.protobuf.UnmodifiableLazyStringList( + groupVertices_); bitField0_ = (bitField0_ & ~0x00000002); } result.groupVertices_ = groupVertices_; - if (((from_bitField0_ & 0x00000004) != 0)) { - if (mergedInputDescriptorBuilder_ == null) { - result.mergedInputDescriptor_ = mergedInputDescriptor_; - } else { - result.mergedInputDescriptor_ = mergedInputDescriptorBuilder_.build(); - } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000002; } + if (mergedInputDescriptorBuilder_ == null) { + result.mergedInputDescriptor_ = mergedInputDescriptor_; + } else { + result.mergedInputDescriptor_ = mergedInputDescriptorBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto)other); @@ -3314,17 +2890,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasMergedInputDescriptor()) { mergeMergedInputDescriptor(other.getMergedInputDescriptor()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3334,7 +2907,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -3344,27 +2917,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string group_name = 1; private java.lang.Object groupName_ = ""; /** * optional string group_name = 1; - * @return Whether the groupName field is set. */ public boolean hasGroupName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string group_name = 1; - * @return The groupName. */ public java.lang.String getGroupName() { java.lang.Object ref = groupName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - groupName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + groupName_ = s; return s; } else { return (java.lang.String) ref; @@ -3372,7 +2941,6 @@ public java.lang.String getGroupName() { } /** * optional string group_name = 1; - * @return The bytes for groupName. */ public com.google.protobuf.ByteString getGroupNameBytes() { @@ -3389,8 +2957,6 @@ public java.lang.String getGroupName() { } /** * optional string group_name = 1; - * @param value The groupName to set. - * @return This builder for chaining. */ public Builder setGroupName( java.lang.String value) { @@ -3404,7 +2970,6 @@ public Builder setGroupName( } /** * optional string group_name = 1; - * @return This builder for chaining. */ public Builder clearGroupName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -3414,8 +2979,6 @@ public Builder clearGroupName() { } /** * optional string group_name = 1; - * @param value The bytes for groupName to set. - * @return This builder for chaining. */ public Builder setGroupNameBytes( com.google.protobuf.ByteString value) { @@ -3428,40 +2991,35 @@ public Builder setGroupNameBytes( return this; } + // repeated string group_vertices = 2; private com.google.protobuf.LazyStringList groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureGroupVerticesIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { groupVertices_ = new com.google.protobuf.LazyStringArrayList(groupVertices_); bitField0_ |= 0x00000002; } } /** * repeated string group_vertices = 2; - * @return A list containing the groupVertices. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getGroupVerticesList() { - return groupVertices_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(groupVertices_); } /** * repeated string group_vertices = 2; - * @return The count of groupVertices. */ public int getGroupVerticesCount() { return groupVertices_.size(); } /** * repeated string group_vertices = 2; - * @param index The index of the element to return. - * @return The groupVertices at the given index. */ public java.lang.String getGroupVertices(int index) { return groupVertices_.get(index); } /** * repeated string group_vertices = 2; - * @param index The index of the value to return. - * @return The bytes of the groupVertices at the given index. */ public com.google.protobuf.ByteString getGroupVerticesBytes(int index) { @@ -3469,9 +3027,6 @@ public java.lang.String getGroupVertices(int index) { } /** * repeated string group_vertices = 2; - * @param index The index to set the value at. - * @param value The groupVertices to set. - * @return This builder for chaining. */ public Builder setGroupVertices( int index, java.lang.String value) { @@ -3485,8 +3040,6 @@ public Builder setGroupVertices( } /** * repeated string group_vertices = 2; - * @param value The groupVertices to add. - * @return This builder for chaining. */ public Builder addGroupVertices( java.lang.String value) { @@ -3500,20 +3053,16 @@ public Builder addGroupVertices( } /** * repeated string group_vertices = 2; - * @param values The groupVertices to add. - * @return This builder for chaining. */ public Builder addAllGroupVertices( java.lang.Iterable values) { ensureGroupVerticesIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, groupVertices_); + super.addAll(values, groupVertices_); onChanged(); return this; } /** * repeated string group_vertices = 2; - * @return This builder for chaining. */ public Builder clearGroupVertices() { groupVertices_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -3523,8 +3072,6 @@ public Builder clearGroupVertices() { } /** * repeated string group_vertices = 2; - * @param value The bytes of the groupVertices to add. - * @return This builder for chaining. */ public Builder addGroupVerticesBytes( com.google.protobuf.ByteString value) { @@ -3537,23 +3084,22 @@ public Builder addGroupVerticesBytes( return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto mergedInputDescriptor_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .EntityDescriptorProto merged_input_descriptor = 3; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> mergedInputDescriptorBuilder_; /** * optional .EntityDescriptorProto merged_input_descriptor = 3; - * @return Whether the mergedInputDescriptor field is set. */ public boolean hasMergedInputDescriptor() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .EntityDescriptorProto merged_input_descriptor = 3; - * @return The mergedInputDescriptor. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getMergedInputDescriptor() { if (mergedInputDescriptorBuilder_ == null) { - return mergedInputDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : mergedInputDescriptor_; + return mergedInputDescriptor_; } else { return mergedInputDescriptorBuilder_.getMessage(); } @@ -3593,8 +3139,7 @@ public Builder setMergedInputDescriptor( */ public Builder mergeMergedInputDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) { if (mergedInputDescriptorBuilder_ == null) { - if (((bitField0_ & 0x00000004) != 0) && - mergedInputDescriptor_ != null && + if (((bitField0_ & 0x00000004) == 0x00000004) && mergedInputDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) { mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(mergedInputDescriptor_).mergeFrom(value).buildPartial(); @@ -3613,7 +3158,7 @@ public Builder mergeMergedInputDescriptor(org.apache.hadoop.hive.llap.daemon.rpc */ public Builder clearMergedInputDescriptor() { if (mergedInputDescriptorBuilder_ == null) { - mergedInputDescriptor_ = null; + mergedInputDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); onChanged(); } else { mergedInputDescriptorBuilder_.clear(); @@ -3636,119 +3181,72 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes if (mergedInputDescriptorBuilder_ != null) { return mergedInputDescriptorBuilder_.getMessageOrBuilder(); } else { - return mergedInputDescriptor_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : mergedInputDescriptor_; + return mergedInputDescriptor_; } } /** * optional .EntityDescriptorProto merged_input_descriptor = 3; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> getMergedInputDescriptorFieldBuilder() { if (mergedInputDescriptorBuilder_ == null) { - mergedInputDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + mergedInputDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>( - getMergedInputDescriptor(), + mergedInputDescriptor_, getParentForChildren(), isClean()); mergedInputDescriptor_ = null; } return mergedInputDescriptorBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GroupInputSpecProto) } - // @@protoc_insertion_point(class_scope:GroupInputSpecProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GroupInputSpecProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GroupInputSpecProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GroupInputSpecProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GroupInputSpecProto) } - public interface SignableVertexSpecOrBuilder extends - // @@protoc_insertion_point(interface_extends:SignableVertexSpec) - com.google.protobuf.MessageOrBuilder { + public interface SignableVertexSpecOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string user = 1; /** * optional string user = 1; - * @return Whether the user field is set. */ boolean hasUser(); /** * optional string user = 1; - * @return The user. */ java.lang.String getUser(); /** * optional string user = 1; - * @return The bytes for user. */ com.google.protobuf.ByteString getUserBytes(); + // optional int64 signatureKeyId = 2; /** * optional int64 signatureKeyId = 2; - * @return Whether the signatureKeyId field is set. */ boolean hasSignatureKeyId(); /** * optional int64 signatureKeyId = 2; - * @return The signatureKeyId. */ long getSignatureKeyId(); + // optional .QueryIdentifierProto query_identifier = 3; /** * optional .QueryIdentifierProto query_identifier = 3; - * @return Whether the queryIdentifier field is set. */ boolean hasQueryIdentifier(); /** * optional .QueryIdentifierProto query_identifier = 3; - * @return The queryIdentifier. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); /** @@ -3756,117 +3254,107 @@ public interface SignableVertexSpecOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + // optional string hive_query_id = 4; /** * optional string hive_query_id = 4; - * @return Whether the hiveQueryId field is set. */ boolean hasHiveQueryId(); /** * optional string hive_query_id = 4; - * @return The hiveQueryId. */ java.lang.String getHiveQueryId(); /** * optional string hive_query_id = 4; - * @return The bytes for hiveQueryId. */ com.google.protobuf.ByteString getHiveQueryIdBytes(); + // optional string dag_name = 5; /** + * optional string dag_name = 5; + * *
      * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
      * 
- * - * optional string dag_name = 5; - * @return Whether the dagName field is set. */ boolean hasDagName(); /** + * optional string dag_name = 5; + * *
      * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
      * 
- * - * optional string dag_name = 5; - * @return The dagName. */ java.lang.String getDagName(); /** + * optional string dag_name = 5; + * *
      * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
      * 
- * - * optional string dag_name = 5; - * @return The bytes for dagName. */ com.google.protobuf.ByteString getDagNameBytes(); + // optional string vertex_name = 6; /** * optional string vertex_name = 6; - * @return Whether the vertexName field is set. */ boolean hasVertexName(); /** * optional string vertex_name = 6; - * @return The vertexName. */ java.lang.String getVertexName(); /** * optional string vertex_name = 6; - * @return The bytes for vertexName. */ com.google.protobuf.ByteString getVertexNameBytes(); + // optional int32 vertex_index = 7; /** * optional int32 vertex_index = 7; - * @return Whether the vertexIndex field is set. */ boolean hasVertexIndex(); /** * optional int32 vertex_index = 7; - * @return The vertexIndex. */ int getVertexIndex(); + // optional string token_identifier = 8; /** + * optional string token_identifier = 8; + * *
      * The core vertex stuff 
      * 
- * - * optional string token_identifier = 8; - * @return Whether the tokenIdentifier field is set. */ boolean hasTokenIdentifier(); /** + * optional string token_identifier = 8; + * *
      * The core vertex stuff 
      * 
- * - * optional string token_identifier = 8; - * @return The tokenIdentifier. */ java.lang.String getTokenIdentifier(); /** + * optional string token_identifier = 8; + * *
      * The core vertex stuff 
      * 
- * - * optional string token_identifier = 8; - * @return The bytes for tokenIdentifier. */ com.google.protobuf.ByteString getTokenIdentifierBytes(); + // optional .EntityDescriptorProto processor_descriptor = 9; /** * optional .EntityDescriptorProto processor_descriptor = 9; - * @return Whether the processorDescriptor field is set. */ boolean hasProcessorDescriptor(); /** * optional .EntityDescriptorProto processor_descriptor = 9; - * @return The processorDescriptor. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor(); /** @@ -3874,6 +3362,7 @@ public interface SignableVertexSpecOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder(); + // repeated .IOSpecProto input_specs = 10; /** * repeated .IOSpecProto input_specs = 10; */ @@ -3898,6 +3387,7 @@ public interface SignableVertexSpecOrBuilder extends org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder( int index); + // repeated .IOSpecProto output_specs = 11; /** * repeated .IOSpecProto output_specs = 11; */ @@ -3922,6 +3412,7 @@ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBui org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder( int index); + // repeated .GroupInputSpecProto grouped_input_specs = 12; /** * repeated .GroupInputSpecProto grouped_input_specs = 12; */ @@ -3946,83 +3437,71 @@ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBui org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder( int index); + // optional int32 vertex_parallelism = 13; /** + * optional int32 vertex_parallelism = 13; + * *
      * An internal field required for Tez.
      * 
- * - * optional int32 vertex_parallelism = 13; - * @return Whether the vertexParallelism field is set. */ boolean hasVertexParallelism(); /** + * optional int32 vertex_parallelism = 13; + * *
      * An internal field required for Tez.
      * 
- * - * optional int32 vertex_parallelism = 13; - * @return The vertexParallelism. */ int getVertexParallelism(); + // optional bool is_external_submission = 14 [default = false]; /** * optional bool is_external_submission = 14 [default = false]; - * @return Whether the isExternalSubmission field is set. */ boolean hasIsExternalSubmission(); /** * optional bool is_external_submission = 14 [default = false]; - * @return The isExternalSubmission. */ boolean getIsExternalSubmission(); } /** + * Protobuf type {@code SignableVertexSpec} + * *
    * The part of SubmitWork that can be signed 
    * 
- * - * Protobuf type {@code SignableVertexSpec} */ public static final class SignableVertexSpec extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SignableVertexSpec) - SignableVertexSpecOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SignableVertexSpecOrBuilder { // Use SignableVertexSpec.newBuilder() to construct. - private SignableVertexSpec(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SignableVertexSpec(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SignableVertexSpec() { - user_ = ""; - hiveQueryId_ = ""; - dagName_ = ""; - vertexName_ = ""; - tokenIdentifier_ = ""; - inputSpecs_ = java.util.Collections.emptyList(); - outputSpecs_ = java.util.Collections.emptyList(); - groupedInputSpecs_ = java.util.Collections.emptyList(); + private SignableVertexSpec(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SignableVertexSpec defaultInstance; + public static SignableVertexSpec getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SignableVertexSpec(); + public SignableVertexSpec getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SignableVertexSpec( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -4034,10 +3513,16 @@ private SignableVertexSpec( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - user_ = bs; + user_ = input.readBytes(); break; } case 16: { @@ -4047,7 +3532,7 @@ private SignableVertexSpec( } case 26: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = queryIdentifier_.toBuilder(); } queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); @@ -4059,21 +3544,18 @@ private SignableVertexSpec( break; } case 34: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000008; - hiveQueryId_ = bs; + hiveQueryId_ = input.readBytes(); break; } case 42: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000010; - dagName_ = bs; + dagName_ = input.readBytes(); break; } case 50: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; - vertexName_ = bs; + vertexName_ = input.readBytes(); break; } case 56: { @@ -4082,14 +3564,13 @@ private SignableVertexSpec( break; } case 66: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000080; - tokenIdentifier_ = bs; + tokenIdentifier_ = input.readBytes(); break; } case 74: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { subBuilder = processorDescriptor_.toBuilder(); } processorDescriptor_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.PARSER, extensionRegistry); @@ -4101,30 +3582,27 @@ private SignableVertexSpec( break; } case 82: { - if (!((mutable_bitField0_ & 0x00000200) != 0)) { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { inputSpecs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000200; } - inputSpecs_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry)); + inputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry)); break; } case 90: { - if (!((mutable_bitField0_ & 0x00000400) != 0)) { + if (!((mutable_bitField0_ & 0x00000400) == 0x00000400)) { outputSpecs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000400; } - outputSpecs_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry)); + outputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.PARSER, extensionRegistry)); break; } case 98: { - if (!((mutable_bitField0_ & 0x00000800) != 0)) { + if (!((mutable_bitField0_ & 0x00000800) == 0x00000800)) { groupedInputSpecs_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000800; } - groupedInputSpecs_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.PARSER, extensionRegistry)); + groupedInputSpecs_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.PARSER, extensionRegistry)); break; } case 104: { @@ -4137,30 +3615,21 @@ private SignableVertexSpec( isExternalSubmission_ = input.readBool(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000200) != 0)) { + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_); } - if (((mutable_bitField0_ & 0x00000400) != 0)) { + if (((mutable_bitField0_ & 0x00000400) == 0x00000400)) { outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_); } - if (((mutable_bitField0_ & 0x00000800) != 0)) { + if (((mutable_bitField0_ & 0x00000800) == 0x00000800)) { groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_); } this.unknownFields = unknownFields.build(); @@ -4172,30 +3641,41 @@ private SignableVertexSpec( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SignableVertexSpec_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SignableVertexSpec_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SignableVertexSpec parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SignableVertexSpec(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string user = 1; public static final int USER_FIELD_NUMBER = 1; - private volatile java.lang.Object user_; + private java.lang.Object user_; /** * optional string user = 1; - * @return Whether the user field is set. */ - @java.lang.Override public boolean hasUser() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string user = 1; - * @return The user. */ - @java.lang.Override public java.lang.String getUser() { java.lang.Object ref = user_; if (ref instanceof java.lang.String) { @@ -4212,9 +3692,7 @@ public java.lang.String getUser() { } /** * optional string user = 1; - * @return The bytes for user. */ - @java.lang.Override public com.google.protobuf.ByteString getUserBytes() { java.lang.Object ref = user_; @@ -4229,66 +3707,56 @@ public java.lang.String getUser() { } } + // optional int64 signatureKeyId = 2; public static final int SIGNATUREKEYID_FIELD_NUMBER = 2; private long signatureKeyId_; /** * optional int64 signatureKeyId = 2; - * @return Whether the signatureKeyId field is set. */ - @java.lang.Override public boolean hasSignatureKeyId() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 signatureKeyId = 2; - * @return The signatureKeyId. */ - @java.lang.Override public long getSignatureKeyId() { return signatureKeyId_; } + // optional .QueryIdentifierProto query_identifier = 3; public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 3; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; /** * optional .QueryIdentifierProto query_identifier = 3; - * @return Whether the queryIdentifier field is set. */ - @java.lang.Override public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .QueryIdentifierProto query_identifier = 3; - * @return The queryIdentifier. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } /** * optional .QueryIdentifierProto query_identifier = 3; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } + // optional string hive_query_id = 4; public static final int HIVE_QUERY_ID_FIELD_NUMBER = 4; - private volatile java.lang.Object hiveQueryId_; + private java.lang.Object hiveQueryId_; /** * optional string hive_query_id = 4; - * @return Whether the hiveQueryId field is set. */ - @java.lang.Override public boolean hasHiveQueryId() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional string hive_query_id = 4; - * @return The hiveQueryId. */ - @java.lang.Override public java.lang.String getHiveQueryId() { java.lang.Object ref = hiveQueryId_; if (ref instanceof java.lang.String) { @@ -4305,9 +3773,7 @@ public java.lang.String getHiveQueryId() { } /** * optional string hive_query_id = 4; - * @return The bytes for hiveQueryId. */ - @java.lang.Override public com.google.protobuf.ByteString getHiveQueryIdBytes() { java.lang.Object ref = hiveQueryId_; @@ -4322,29 +3788,26 @@ public java.lang.String getHiveQueryId() { } } + // optional string dag_name = 5; public static final int DAG_NAME_FIELD_NUMBER = 5; - private volatile java.lang.Object dagName_; + private java.lang.Object dagName_; /** + * optional string dag_name = 5; + * *
      * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
      * 
- * - * optional string dag_name = 5; - * @return Whether the dagName field is set. */ - @java.lang.Override public boolean hasDagName() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** + * optional string dag_name = 5; + * *
      * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
      * 
- * - * optional string dag_name = 5; - * @return The dagName. */ - @java.lang.Override public java.lang.String getDagName() { java.lang.Object ref = dagName_; if (ref instanceof java.lang.String) { @@ -4360,14 +3823,12 @@ public java.lang.String getDagName() { } } /** + * optional string dag_name = 5; + * *
      * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
      * 
- * - * optional string dag_name = 5; - * @return The bytes for dagName. */ - @java.lang.Override public com.google.protobuf.ByteString getDagNameBytes() { java.lang.Object ref = dagName_; @@ -4382,21 +3843,18 @@ public java.lang.String getDagName() { } } + // optional string vertex_name = 6; public static final int VERTEX_NAME_FIELD_NUMBER = 6; - private volatile java.lang.Object vertexName_; + private java.lang.Object vertexName_; /** * optional string vertex_name = 6; - * @return Whether the vertexName field is set. */ - @java.lang.Override public boolean hasVertexName() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string vertex_name = 6; - * @return The vertexName. */ - @java.lang.Override public java.lang.String getVertexName() { java.lang.Object ref = vertexName_; if (ref instanceof java.lang.String) { @@ -4413,9 +3871,7 @@ public java.lang.String getVertexName() { } /** * optional string vertex_name = 6; - * @return The bytes for vertexName. */ - @java.lang.Override public com.google.protobuf.ByteString getVertexNameBytes() { java.lang.Object ref = vertexName_; @@ -4430,48 +3886,42 @@ public java.lang.String getVertexName() { } } + // optional int32 vertex_index = 7; public static final int VERTEX_INDEX_FIELD_NUMBER = 7; private int vertexIndex_; /** * optional int32 vertex_index = 7; - * @return Whether the vertexIndex field is set. */ - @java.lang.Override public boolean hasVertexIndex() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional int32 vertex_index = 7; - * @return The vertexIndex. */ - @java.lang.Override public int getVertexIndex() { return vertexIndex_; } + // optional string token_identifier = 8; public static final int TOKEN_IDENTIFIER_FIELD_NUMBER = 8; - private volatile java.lang.Object tokenIdentifier_; + private java.lang.Object tokenIdentifier_; /** + * optional string token_identifier = 8; + * *
      * The core vertex stuff 
      * 
- * - * optional string token_identifier = 8; - * @return Whether the tokenIdentifier field is set. */ - @java.lang.Override public boolean hasTokenIdentifier() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** + * optional string token_identifier = 8; + * *
      * The core vertex stuff 
      * 
- * - * optional string token_identifier = 8; - * @return The tokenIdentifier. */ - @java.lang.Override public java.lang.String getTokenIdentifier() { java.lang.Object ref = tokenIdentifier_; if (ref instanceof java.lang.String) { @@ -4487,14 +3937,12 @@ public java.lang.String getTokenIdentifier() { } } /** + * optional string token_identifier = 8; + * *
      * The core vertex stuff 
      * 
- * - * optional string token_identifier = 8; - * @return The bytes for tokenIdentifier. */ - @java.lang.Override public com.google.protobuf.ByteString getTokenIdentifierBytes() { java.lang.Object ref = tokenIdentifier_; @@ -4509,45 +3957,40 @@ public java.lang.String getTokenIdentifier() { } } + // optional .EntityDescriptorProto processor_descriptor = 9; public static final int PROCESSOR_DESCRIPTOR_FIELD_NUMBER = 9; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_; /** * optional .EntityDescriptorProto processor_descriptor = 9; - * @return Whether the processorDescriptor field is set. */ - @java.lang.Override public boolean hasProcessorDescriptor() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .EntityDescriptorProto processor_descriptor = 9; - * @return The processorDescriptor. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor() { - return processorDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : processorDescriptor_; + return processorDescriptor_; } /** * optional .EntityDescriptorProto processor_descriptor = 9; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder getProcessorDescriptorOrBuilder() { - return processorDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : processorDescriptor_; + return processorDescriptor_; } + // repeated .IOSpecProto input_specs = 10; public static final int INPUT_SPECS_FIELD_NUMBER = 10; private java.util.List inputSpecs_; /** * repeated .IOSpecProto input_specs = 10; */ - @java.lang.Override public java.util.List getInputSpecsList() { return inputSpecs_; } /** * repeated .IOSpecProto input_specs = 10; */ - @java.lang.Override public java.util.List getInputSpecsOrBuilderList() { return inputSpecs_; @@ -4555,39 +3998,35 @@ public java.util.Listrepeated .IOSpecProto input_specs = 10; */ - @java.lang.Override public int getInputSpecsCount() { return inputSpecs_.size(); } /** * repeated .IOSpecProto input_specs = 10; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getInputSpecs(int index) { return inputSpecs_.get(index); } /** * repeated .IOSpecProto input_specs = 10; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getInputSpecsOrBuilder( int index) { return inputSpecs_.get(index); } + // repeated .IOSpecProto output_specs = 11; public static final int OUTPUT_SPECS_FIELD_NUMBER = 11; private java.util.List outputSpecs_; /** * repeated .IOSpecProto output_specs = 11; */ - @java.lang.Override public java.util.List getOutputSpecsList() { return outputSpecs_; } /** * repeated .IOSpecProto output_specs = 11; */ - @java.lang.Override public java.util.List getOutputSpecsOrBuilderList() { return outputSpecs_; @@ -4595,39 +4034,35 @@ public java.util.Listrepeated .IOSpecProto output_specs = 11; */ - @java.lang.Override public int getOutputSpecsCount() { return outputSpecs_.size(); } /** * repeated .IOSpecProto output_specs = 11; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto getOutputSpecs(int index) { return outputSpecs_.get(index); } /** * repeated .IOSpecProto output_specs = 11; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder getOutputSpecsOrBuilder( int index) { return outputSpecs_.get(index); } + // repeated .GroupInputSpecProto grouped_input_specs = 12; public static final int GROUPED_INPUT_SPECS_FIELD_NUMBER = 12; private java.util.List groupedInputSpecs_; /** * repeated .GroupInputSpecProto grouped_input_specs = 12; */ - @java.lang.Override public java.util.List getGroupedInputSpecsList() { return groupedInputSpecs_; } /** * repeated .GroupInputSpecProto grouped_input_specs = 12; */ - @java.lang.Override public java.util.List getGroupedInputSpecsOrBuilderList() { return groupedInputSpecs_; @@ -4635,112 +4070,117 @@ public java.util.Listrepeated .GroupInputSpecProto grouped_input_specs = 12; */ - @java.lang.Override public int getGroupedInputSpecsCount() { return groupedInputSpecs_.size(); } /** * repeated .GroupInputSpecProto grouped_input_specs = 12; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto getGroupedInputSpecs(int index) { return groupedInputSpecs_.get(index); } /** * repeated .GroupInputSpecProto grouped_input_specs = 12; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder getGroupedInputSpecsOrBuilder( int index) { return groupedInputSpecs_.get(index); } + // optional int32 vertex_parallelism = 13; public static final int VERTEX_PARALLELISM_FIELD_NUMBER = 13; private int vertexParallelism_; /** + * optional int32 vertex_parallelism = 13; + * *
      * An internal field required for Tez.
      * 
- * - * optional int32 vertex_parallelism = 13; - * @return Whether the vertexParallelism field is set. */ - @java.lang.Override public boolean hasVertexParallelism() { - return ((bitField0_ & 0x00000200) != 0); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** + * optional int32 vertex_parallelism = 13; + * *
      * An internal field required for Tez.
      * 
- * - * optional int32 vertex_parallelism = 13; - * @return The vertexParallelism. */ - @java.lang.Override public int getVertexParallelism() { return vertexParallelism_; } + // optional bool is_external_submission = 14 [default = false]; public static final int IS_EXTERNAL_SUBMISSION_FIELD_NUMBER = 14; private boolean isExternalSubmission_; /** * optional bool is_external_submission = 14 [default = false]; - * @return Whether the isExternalSubmission field is set. */ - @java.lang.Override public boolean hasIsExternalSubmission() { - return ((bitField0_ & 0x00000400) != 0); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional bool is_external_submission = 14 [default = false]; - * @return The isExternalSubmission. */ - @java.lang.Override public boolean getIsExternalSubmission() { return isExternalSubmission_; } + private void initFields() { + user_ = ""; + signatureKeyId_ = 0L; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + hiveQueryId_ = ""; + dagName_ = ""; + vertexName_ = ""; + vertexIndex_ = 0; + tokenIdentifier_ = ""; + processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); + inputSpecs_ = java.util.Collections.emptyList(); + outputSpecs_ = java.util.Collections.emptyList(); + groupedInputSpecs_ = java.util.Collections.emptyList(); + vertexParallelism_ = 0; + isExternalSubmission_ = false; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, user_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, signatureKeyId_); } - if (((bitField0_ & 0x00000004) != 0)) { - output.writeMessage(3, getQueryIdentifier()); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, queryIdentifier_); } - if (((bitField0_ & 0x00000008) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 4, hiveQueryId_); + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getHiveQueryIdBytes()); } - if (((bitField0_ & 0x00000010) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 5, dagName_); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getDagNameBytes()); } - if (((bitField0_ & 0x00000020) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 6, vertexName_); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getVertexNameBytes()); } - if (((bitField0_ & 0x00000040) != 0)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeInt32(7, vertexIndex_); } - if (((bitField0_ & 0x00000080) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 8, tokenIdentifier_); + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeBytes(8, getTokenIdentifierBytes()); } - if (((bitField0_ & 0x00000100) != 0)) { - output.writeMessage(9, getProcessorDescriptor()); + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(9, processorDescriptor_); } for (int i = 0; i < inputSpecs_.size(); i++) { output.writeMessage(10, inputSpecs_.get(i)); @@ -4751,51 +4191,56 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) for (int i = 0; i < groupedInputSpecs_.size(); i++) { output.writeMessage(12, groupedInputSpecs_.get(i)); } - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeInt32(13, vertexParallelism_); } - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeBool(14, isExternalSubmission_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, user_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getUserBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, signatureKeyId_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, getQueryIdentifier()); + .computeMessageSize(3, queryIdentifier_); } - if (((bitField0_ & 0x00000008) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, hiveQueryId_); + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getHiveQueryIdBytes()); } - if (((bitField0_ & 0x00000010) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, dagName_); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getDagNameBytes()); } - if (((bitField0_ & 0x00000020) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, vertexName_); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getVertexNameBytes()); } - if (((bitField0_ & 0x00000040) != 0)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(7, vertexIndex_); } - if (((bitField0_ & 0x00000080) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, tokenIdentifier_); + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(8, getTokenIdentifierBytes()); } - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, getProcessorDescriptor()); + .computeMessageSize(9, processorDescriptor_); } for (int i = 0; i < inputSpecs_.size(); i++) { size += com.google.protobuf.CodedOutputStream @@ -4809,19 +4254,26 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(12, groupedInputSpecs_.get(i)); } - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(13, vertexParallelism_); } - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(14, isExternalSubmission_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -4832,86 +4284,88 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec) obj; - if (hasUser() != other.hasUser()) return false; + boolean result = true; + result = result && (hasUser() == other.hasUser()); if (hasUser()) { - if (!getUser() - .equals(other.getUser())) return false; + result = result && getUser() + .equals(other.getUser()); } - if (hasSignatureKeyId() != other.hasSignatureKeyId()) return false; + result = result && (hasSignatureKeyId() == other.hasSignatureKeyId()); if (hasSignatureKeyId()) { - if (getSignatureKeyId() - != other.getSignatureKeyId()) return false; + result = result && (getSignatureKeyId() + == other.getSignatureKeyId()); } - if (hasQueryIdentifier() != other.hasQueryIdentifier()) return false; + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { - if (!getQueryIdentifier() - .equals(other.getQueryIdentifier())) return false; + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); } - if (hasHiveQueryId() != other.hasHiveQueryId()) return false; + result = result && (hasHiveQueryId() == other.hasHiveQueryId()); if (hasHiveQueryId()) { - if (!getHiveQueryId() - .equals(other.getHiveQueryId())) return false; + result = result && getHiveQueryId() + .equals(other.getHiveQueryId()); } - if (hasDagName() != other.hasDagName()) return false; + result = result && (hasDagName() == other.hasDagName()); if (hasDagName()) { - if (!getDagName() - .equals(other.getDagName())) return false; + result = result && getDagName() + .equals(other.getDagName()); } - if (hasVertexName() != other.hasVertexName()) return false; + result = result && (hasVertexName() == other.hasVertexName()); if (hasVertexName()) { - if (!getVertexName() - .equals(other.getVertexName())) return false; + result = result && getVertexName() + .equals(other.getVertexName()); } - if (hasVertexIndex() != other.hasVertexIndex()) return false; + result = result && (hasVertexIndex() == other.hasVertexIndex()); if (hasVertexIndex()) { - if (getVertexIndex() - != other.getVertexIndex()) return false; + result = result && (getVertexIndex() + == other.getVertexIndex()); } - if (hasTokenIdentifier() != other.hasTokenIdentifier()) return false; + result = result && (hasTokenIdentifier() == other.hasTokenIdentifier()); if (hasTokenIdentifier()) { - if (!getTokenIdentifier() - .equals(other.getTokenIdentifier())) return false; + result = result && getTokenIdentifier() + .equals(other.getTokenIdentifier()); } - if (hasProcessorDescriptor() != other.hasProcessorDescriptor()) return false; + result = result && (hasProcessorDescriptor() == other.hasProcessorDescriptor()); if (hasProcessorDescriptor()) { - if (!getProcessorDescriptor() - .equals(other.getProcessorDescriptor())) return false; - } - if (!getInputSpecsList() - .equals(other.getInputSpecsList())) return false; - if (!getOutputSpecsList() - .equals(other.getOutputSpecsList())) return false; - if (!getGroupedInputSpecsList() - .equals(other.getGroupedInputSpecsList())) return false; - if (hasVertexParallelism() != other.hasVertexParallelism()) return false; + result = result && getProcessorDescriptor() + .equals(other.getProcessorDescriptor()); + } + result = result && getInputSpecsList() + .equals(other.getInputSpecsList()); + result = result && getOutputSpecsList() + .equals(other.getOutputSpecsList()); + result = result && getGroupedInputSpecsList() + .equals(other.getGroupedInputSpecsList()); + result = result && (hasVertexParallelism() == other.hasVertexParallelism()); if (hasVertexParallelism()) { - if (getVertexParallelism() - != other.getVertexParallelism()) return false; + result = result && (getVertexParallelism() + == other.getVertexParallelism()); } - if (hasIsExternalSubmission() != other.hasIsExternalSubmission()) return false; + result = result && (hasIsExternalSubmission() == other.hasIsExternalSubmission()); if (hasIsExternalSubmission()) { - if (getIsExternalSubmission() - != other.getIsExternalSubmission()) return false; + result = result && (getIsExternalSubmission() + == other.getIsExternalSubmission()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasUser()) { hash = (37 * hash) + USER_FIELD_NUMBER; hash = (53 * hash) + getUser().hashCode(); } if (hasSignatureKeyId()) { hash = (37 * hash) + SIGNATUREKEYID_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getSignatureKeyId()); + hash = (53 * hash) + hashLong(getSignatureKeyId()); } if (hasQueryIdentifier()) { hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; @@ -4959,25 +4413,13 @@ public int hashCode() { } if (hasIsExternalSubmission()) { hash = (37 * hash) + IS_EXTERNAL_SUBMISSION_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getIsExternalSubmission()); + hash = (53 * hash) + hashBoolean(getIsExternalSubmission()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -5001,80 +4443,65 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Si } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** + * Protobuf type {@code SignableVertexSpec} + * *
      * The part of SubmitWork that can be signed 
      * 
- * - * Protobuf type {@code SignableVertexSpec} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SignableVertexSpec) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SignableVertexSpec_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SignableVertexSpec_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -5087,13 +4514,12 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getQueryIdentifierFieldBuilder(); getProcessorDescriptorFieldBuilder(); getInputSpecsFieldBuilder(); @@ -5101,7 +4527,10 @@ private void maybeForceBuilderInitialization() { getGroupedInputSpecsFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); user_ = ""; @@ -5109,7 +4538,7 @@ public Builder clear() { signatureKeyId_ = 0L; bitField0_ = (bitField0_ & ~0x00000002); if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } else { queryIdentifierBuilder_.clear(); } @@ -5125,7 +4554,7 @@ public Builder clear() { tokenIdentifier_ = ""; bitField0_ = (bitField0_ & ~0x00000080); if (processorDescriptorBuilder_ == null) { - processorDescriptor_ = null; + processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); } else { processorDescriptorBuilder_.clear(); } @@ -5155,18 +4584,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SignableVertexSpec_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec result = buildPartial(); if (!result.isInitialized()) { @@ -5175,57 +4605,56 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableV return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.user_ = user_; - if (((from_bitField0_ & 0x00000002) != 0)) { - result.signatureKeyId_ = signatureKeyId_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { - if (queryIdentifierBuilder_ == null) { - result.queryIdentifier_ = queryIdentifier_; - } else { - result.queryIdentifier_ = queryIdentifierBuilder_.build(); - } + result.signatureKeyId_ = signatureKeyId_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - if (((from_bitField0_ & 0x00000008) != 0)) { + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.hiveQueryId_ = hiveQueryId_; - if (((from_bitField0_ & 0x00000010) != 0)) { + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.dagName_ = dagName_; - if (((from_bitField0_ & 0x00000020) != 0)) { + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.vertexName_ = vertexName_; - if (((from_bitField0_ & 0x00000040) != 0)) { - result.vertexIndex_ = vertexIndex_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } - if (((from_bitField0_ & 0x00000080) != 0)) { + result.vertexIndex_ = vertexIndex_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.tokenIdentifier_ = tokenIdentifier_; - if (((from_bitField0_ & 0x00000100) != 0)) { - if (processorDescriptorBuilder_ == null) { - result.processorDescriptor_ = processorDescriptor_; - } else { - result.processorDescriptor_ = processorDescriptorBuilder_.build(); - } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000100; } + if (processorDescriptorBuilder_ == null) { + result.processorDescriptor_ = processorDescriptor_; + } else { + result.processorDescriptor_ = processorDescriptorBuilder_.build(); + } if (inputSpecsBuilder_ == null) { - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { inputSpecs_ = java.util.Collections.unmodifiableList(inputSpecs_); bitField0_ = (bitField0_ & ~0x00000200); } @@ -5234,7 +4663,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableV result.inputSpecs_ = inputSpecsBuilder_.build(); } if (outputSpecsBuilder_ == null) { - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { outputSpecs_ = java.util.Collections.unmodifiableList(outputSpecs_); bitField0_ = (bitField0_ & ~0x00000400); } @@ -5243,7 +4672,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableV result.outputSpecs_ = outputSpecsBuilder_.build(); } if (groupedInputSpecsBuilder_ == null) { - if (((bitField0_ & 0x00000800) != 0)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { groupedInputSpecs_ = java.util.Collections.unmodifiableList(groupedInputSpecs_); bitField0_ = (bitField0_ & ~0x00000800); } @@ -5251,52 +4680,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableV } else { result.groupedInputSpecs_ = groupedInputSpecsBuilder_.build(); } - if (((from_bitField0_ & 0x00001000) != 0)) { - result.vertexParallelism_ = vertexParallelism_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { to_bitField0_ |= 0x00000200; } - if (((from_bitField0_ & 0x00002000) != 0)) { - result.isExternalSubmission_ = isExternalSubmission_; + result.vertexParallelism_ = vertexParallelism_; + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { to_bitField0_ |= 0x00000400; } + result.isExternalSubmission_ = isExternalSubmission_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec)other); @@ -5364,7 +4760,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc inputSpecs_ = other.inputSpecs_; bitField0_ = (bitField0_ & ~0x00000200); inputSpecsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getInputSpecsFieldBuilder() : null; } else { inputSpecsBuilder_.addAllMessages(other.inputSpecs_); @@ -5390,7 +4786,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc outputSpecs_ = other.outputSpecs_; bitField0_ = (bitField0_ & ~0x00000400); outputSpecsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getOutputSpecsFieldBuilder() : null; } else { outputSpecsBuilder_.addAllMessages(other.outputSpecs_); @@ -5416,7 +4812,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc groupedInputSpecs_ = other.groupedInputSpecs_; bitField0_ = (bitField0_ & ~0x00000800); groupedInputSpecsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getGroupedInputSpecsFieldBuilder() : null; } else { groupedInputSpecsBuilder_.addAllMessages(other.groupedInputSpecs_); @@ -5429,17 +4825,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasIsExternalSubmission()) { setIsExternalSubmission(other.getIsExternalSubmission()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -5449,7 +4842,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -5459,27 +4852,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string user = 1; private java.lang.Object user_ = ""; /** * optional string user = 1; - * @return Whether the user field is set. */ public boolean hasUser() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string user = 1; - * @return The user. */ public java.lang.String getUser() { java.lang.Object ref = user_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - user_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + user_ = s; return s; } else { return (java.lang.String) ref; @@ -5487,7 +4876,6 @@ public java.lang.String getUser() { } /** * optional string user = 1; - * @return The bytes for user. */ public com.google.protobuf.ByteString getUserBytes() { @@ -5504,8 +4892,6 @@ public java.lang.String getUser() { } /** * optional string user = 1; - * @param value The user to set. - * @return This builder for chaining. */ public Builder setUser( java.lang.String value) { @@ -5519,7 +4905,6 @@ public Builder setUser( } /** * optional string user = 1; - * @return This builder for chaining. */ public Builder clearUser() { bitField0_ = (bitField0_ & ~0x00000001); @@ -5529,8 +4914,6 @@ public Builder clearUser() { } /** * optional string user = 1; - * @param value The bytes for user to set. - * @return This builder for chaining. */ public Builder setUserBytes( com.google.protobuf.ByteString value) { @@ -5543,27 +4926,22 @@ public Builder setUserBytes( return this; } + // optional int64 signatureKeyId = 2; private long signatureKeyId_ ; /** * optional int64 signatureKeyId = 2; - * @return Whether the signatureKeyId field is set. */ - @java.lang.Override public boolean hasSignatureKeyId() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 signatureKeyId = 2; - * @return The signatureKeyId. */ - @java.lang.Override public long getSignatureKeyId() { return signatureKeyId_; } /** * optional int64 signatureKeyId = 2; - * @param value The signatureKeyId to set. - * @return This builder for chaining. */ public Builder setSignatureKeyId(long value) { bitField0_ |= 0x00000002; @@ -5573,7 +4951,6 @@ public Builder setSignatureKeyId(long value) { } /** * optional int64 signatureKeyId = 2; - * @return This builder for chaining. */ public Builder clearSignatureKeyId() { bitField0_ = (bitField0_ & ~0x00000002); @@ -5582,23 +4959,22 @@ public Builder clearSignatureKeyId() { return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .QueryIdentifierProto query_identifier = 3; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; /** * optional .QueryIdentifierProto query_identifier = 3; - * @return Whether the queryIdentifier field is set. */ public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .QueryIdentifierProto query_identifier = 3; - * @return The queryIdentifier. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } else { return queryIdentifierBuilder_.getMessage(); } @@ -5638,8 +5014,7 @@ public Builder setQueryIdentifier( */ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { if (queryIdentifierBuilder_ == null) { - if (((bitField0_ & 0x00000004) != 0) && - queryIdentifier_ != null && + if (((bitField0_ & 0x00000004) == 0x00000004) && queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); @@ -5658,7 +5033,7 @@ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapD */ public Builder clearQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); onChanged(); } else { queryIdentifierBuilder_.clear(); @@ -5681,20 +5056,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden if (queryIdentifierBuilder_ != null) { return queryIdentifierBuilder_.getMessageOrBuilder(); } else { - return queryIdentifier_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } } /** * optional .QueryIdentifierProto query_identifier = 3; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> getQueryIdentifierFieldBuilder() { if (queryIdentifierBuilder_ == null) { - queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( - getQueryIdentifier(), + queryIdentifier_, getParentForChildren(), isClean()); queryIdentifier_ = null; @@ -5702,27 +5076,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return queryIdentifierBuilder_; } + // optional string hive_query_id = 4; private java.lang.Object hiveQueryId_ = ""; /** * optional string hive_query_id = 4; - * @return Whether the hiveQueryId field is set. */ public boolean hasHiveQueryId() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional string hive_query_id = 4; - * @return The hiveQueryId. */ public java.lang.String getHiveQueryId() { java.lang.Object ref = hiveQueryId_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - hiveQueryId_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hiveQueryId_ = s; return s; } else { return (java.lang.String) ref; @@ -5730,7 +5100,6 @@ public java.lang.String getHiveQueryId() { } /** * optional string hive_query_id = 4; - * @return The bytes for hiveQueryId. */ public com.google.protobuf.ByteString getHiveQueryIdBytes() { @@ -5747,8 +5116,6 @@ public java.lang.String getHiveQueryId() { } /** * optional string hive_query_id = 4; - * @param value The hiveQueryId to set. - * @return This builder for chaining. */ public Builder setHiveQueryId( java.lang.String value) { @@ -5762,7 +5129,6 @@ public Builder setHiveQueryId( } /** * optional string hive_query_id = 4; - * @return This builder for chaining. */ public Builder clearHiveQueryId() { bitField0_ = (bitField0_ & ~0x00000008); @@ -5772,8 +5138,6 @@ public Builder clearHiveQueryId() { } /** * optional string hive_query_id = 4; - * @param value The bytes for hiveQueryId to set. - * @return This builder for chaining. */ public Builder setHiveQueryIdBytes( com.google.protobuf.ByteString value) { @@ -5786,47 +5150,42 @@ public Builder setHiveQueryIdBytes( return this; } + // optional string dag_name = 5; private java.lang.Object dagName_ = ""; /** + * optional string dag_name = 5; + * *
        * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
        * 
- * - * optional string dag_name = 5; - * @return Whether the dagName field is set. */ public boolean hasDagName() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** + * optional string dag_name = 5; + * *
        * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
        * 
- * - * optional string dag_name = 5; - * @return The dagName. */ public java.lang.String getDagName() { java.lang.Object ref = dagName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - dagName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + dagName_ = s; return s; } else { return (java.lang.String) ref; } } /** + * optional string dag_name = 5; + * *
        * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
        * 
- * - * optional string dag_name = 5; - * @return The bytes for dagName. */ public com.google.protobuf.ByteString getDagNameBytes() { @@ -5842,13 +5201,11 @@ public java.lang.String getDagName() { } } /** + * optional string dag_name = 5; + * *
        * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
        * 
- * - * optional string dag_name = 5; - * @param value The dagName to set. - * @return This builder for chaining. */ public Builder setDagName( java.lang.String value) { @@ -5861,12 +5218,11 @@ public Builder setDagName( return this; } /** + * optional string dag_name = 5; + * *
        * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
        * 
- * - * optional string dag_name = 5; - * @return This builder for chaining. */ public Builder clearDagName() { bitField0_ = (bitField0_ & ~0x00000010); @@ -5875,13 +5231,11 @@ public Builder clearDagName() { return this; } /** + * optional string dag_name = 5; + * *
        * Display names cannot be modified by the client for now. If needed, they should be sent to HS2 who will put them here.
        * 
- * - * optional string dag_name = 5; - * @param value The bytes for dagName to set. - * @return This builder for chaining. */ public Builder setDagNameBytes( com.google.protobuf.ByteString value) { @@ -5894,27 +5248,23 @@ public Builder setDagNameBytes( return this; } + // optional string vertex_name = 6; private java.lang.Object vertexName_ = ""; /** * optional string vertex_name = 6; - * @return Whether the vertexName field is set. */ public boolean hasVertexName() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string vertex_name = 6; - * @return The vertexName. */ public java.lang.String getVertexName() { java.lang.Object ref = vertexName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - vertexName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + vertexName_ = s; return s; } else { return (java.lang.String) ref; @@ -5922,7 +5272,6 @@ public java.lang.String getVertexName() { } /** * optional string vertex_name = 6; - * @return The bytes for vertexName. */ public com.google.protobuf.ByteString getVertexNameBytes() { @@ -5939,8 +5288,6 @@ public java.lang.String getVertexName() { } /** * optional string vertex_name = 6; - * @param value The vertexName to set. - * @return This builder for chaining. */ public Builder setVertexName( java.lang.String value) { @@ -5954,7 +5301,6 @@ public Builder setVertexName( } /** * optional string vertex_name = 6; - * @return This builder for chaining. */ public Builder clearVertexName() { bitField0_ = (bitField0_ & ~0x00000020); @@ -5964,8 +5310,6 @@ public Builder clearVertexName() { } /** * optional string vertex_name = 6; - * @param value The bytes for vertexName to set. - * @return This builder for chaining. */ public Builder setVertexNameBytes( com.google.protobuf.ByteString value) { @@ -5978,27 +5322,22 @@ public Builder setVertexNameBytes( return this; } + // optional int32 vertex_index = 7; private int vertexIndex_ ; /** * optional int32 vertex_index = 7; - * @return Whether the vertexIndex field is set. */ - @java.lang.Override public boolean hasVertexIndex() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional int32 vertex_index = 7; - * @return The vertexIndex. */ - @java.lang.Override public int getVertexIndex() { return vertexIndex_; } /** * optional int32 vertex_index = 7; - * @param value The vertexIndex to set. - * @return This builder for chaining. */ public Builder setVertexIndex(int value) { bitField0_ |= 0x00000040; @@ -6008,7 +5347,6 @@ public Builder setVertexIndex(int value) { } /** * optional int32 vertex_index = 7; - * @return This builder for chaining. */ public Builder clearVertexIndex() { bitField0_ = (bitField0_ & ~0x00000040); @@ -6017,47 +5355,42 @@ public Builder clearVertexIndex() { return this; } + // optional string token_identifier = 8; private java.lang.Object tokenIdentifier_ = ""; /** + * optional string token_identifier = 8; + * *
        * The core vertex stuff 
        * 
- * - * optional string token_identifier = 8; - * @return Whether the tokenIdentifier field is set. */ public boolean hasTokenIdentifier() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** + * optional string token_identifier = 8; + * *
        * The core vertex stuff 
        * 
- * - * optional string token_identifier = 8; - * @return The tokenIdentifier. */ public java.lang.String getTokenIdentifier() { java.lang.Object ref = tokenIdentifier_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - tokenIdentifier_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tokenIdentifier_ = s; return s; } else { return (java.lang.String) ref; } } /** + * optional string token_identifier = 8; + * *
        * The core vertex stuff 
        * 
- * - * optional string token_identifier = 8; - * @return The bytes for tokenIdentifier. */ public com.google.protobuf.ByteString getTokenIdentifierBytes() { @@ -6073,13 +5406,11 @@ public java.lang.String getTokenIdentifier() { } } /** + * optional string token_identifier = 8; + * *
        * The core vertex stuff 
        * 
- * - * optional string token_identifier = 8; - * @param value The tokenIdentifier to set. - * @return This builder for chaining. */ public Builder setTokenIdentifier( java.lang.String value) { @@ -6092,12 +5423,11 @@ public Builder setTokenIdentifier( return this; } /** + * optional string token_identifier = 8; + * *
        * The core vertex stuff 
        * 
- * - * optional string token_identifier = 8; - * @return This builder for chaining. */ public Builder clearTokenIdentifier() { bitField0_ = (bitField0_ & ~0x00000080); @@ -6106,13 +5436,11 @@ public Builder clearTokenIdentifier() { return this; } /** + * optional string token_identifier = 8; + * *
        * The core vertex stuff 
        * 
- * - * optional string token_identifier = 8; - * @param value The bytes for tokenIdentifier to set. - * @return This builder for chaining. */ public Builder setTokenIdentifierBytes( com.google.protobuf.ByteString value) { @@ -6125,23 +5453,22 @@ public Builder setTokenIdentifierBytes( return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .EntityDescriptorProto processor_descriptor = 9; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> processorDescriptorBuilder_; /** * optional .EntityDescriptorProto processor_descriptor = 9; - * @return Whether the processorDescriptor field is set. */ public boolean hasProcessorDescriptor() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional .EntityDescriptorProto processor_descriptor = 9; - * @return The processorDescriptor. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto getProcessorDescriptor() { if (processorDescriptorBuilder_ == null) { - return processorDescriptor_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : processorDescriptor_; + return processorDescriptor_; } else { return processorDescriptorBuilder_.getMessage(); } @@ -6181,8 +5508,7 @@ public Builder setProcessorDescriptor( */ public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto value) { if (processorDescriptorBuilder_ == null) { - if (((bitField0_ & 0x00000100) != 0) && - processorDescriptor_ != null && + if (((bitField0_ & 0x00000100) == 0x00000100) && processorDescriptor_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance()) { processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.newBuilder(processorDescriptor_).mergeFrom(value).buildPartial(); @@ -6201,7 +5527,7 @@ public Builder mergeProcessorDescriptor(org.apache.hadoop.hive.llap.daemon.rpc.L */ public Builder clearProcessorDescriptor() { if (processorDescriptorBuilder_ == null) { - processorDescriptor_ = null; + processorDescriptor_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance(); onChanged(); } else { processorDescriptorBuilder_.clear(); @@ -6224,20 +5550,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes if (processorDescriptorBuilder_ != null) { return processorDescriptorBuilder_.getMessageOrBuilder(); } else { - return processorDescriptor_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.getDefaultInstance() : processorDescriptor_; + return processorDescriptor_; } } /** * optional .EntityDescriptorProto processor_descriptor = 9; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder> getProcessorDescriptorFieldBuilder() { if (processorDescriptorBuilder_ == null) { - processorDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + processorDescriptorBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDescriptorProtoOrBuilder>( - getProcessorDescriptor(), + processorDescriptor_, getParentForChildren(), isClean()); processorDescriptor_ = null; @@ -6245,16 +5570,17 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EntityDes return processorDescriptorBuilder_; } + // repeated .IOSpecProto input_specs = 10; private java.util.List inputSpecs_ = java.util.Collections.emptyList(); private void ensureInputSpecsIsMutable() { - if (!((bitField0_ & 0x00000200) != 0)) { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { inputSpecs_ = new java.util.ArrayList(inputSpecs_); bitField0_ |= 0x00000200; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> inputSpecsBuilder_; /** @@ -6386,8 +5712,7 @@ public Builder addAllInputSpecs( java.lang.Iterable values) { if (inputSpecsBuilder_ == null) { ensureInputSpecsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, inputSpecs_); + super.addAll(values, inputSpecs_); onChanged(); } else { inputSpecsBuilder_.addAllMessages(values); @@ -6470,14 +5795,14 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecPro getInputSpecsBuilderList() { return getInputSpecsFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> getInputSpecsFieldBuilder() { if (inputSpecsBuilder_ == null) { - inputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + inputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>( inputSpecs_, - ((bitField0_ & 0x00000200) != 0), + ((bitField0_ & 0x00000200) == 0x00000200), getParentForChildren(), isClean()); inputSpecs_ = null; @@ -6485,16 +5810,17 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecPro return inputSpecsBuilder_; } + // repeated .IOSpecProto output_specs = 11; private java.util.List outputSpecs_ = java.util.Collections.emptyList(); private void ensureOutputSpecsIsMutable() { - if (!((bitField0_ & 0x00000400) != 0)) { + if (!((bitField0_ & 0x00000400) == 0x00000400)) { outputSpecs_ = new java.util.ArrayList(outputSpecs_); bitField0_ |= 0x00000400; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> outputSpecsBuilder_; /** @@ -6626,8 +5952,7 @@ public Builder addAllOutputSpecs( java.lang.Iterable values) { if (outputSpecsBuilder_ == null) { ensureOutputSpecsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, outputSpecs_); + super.addAll(values, outputSpecs_); onChanged(); } else { outputSpecsBuilder_.addAllMessages(values); @@ -6710,14 +6035,14 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecPro getOutputSpecsBuilderList() { return getOutputSpecsFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder> getOutputSpecsFieldBuilder() { if (outputSpecsBuilder_ == null) { - outputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + outputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecProtoOrBuilder>( outputSpecs_, - ((bitField0_ & 0x00000400) != 0), + ((bitField0_ & 0x00000400) == 0x00000400), getParentForChildren(), isClean()); outputSpecs_ = null; @@ -6725,16 +6050,17 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.IOSpecPro return outputSpecsBuilder_; } + // repeated .GroupInputSpecProto grouped_input_specs = 12; private java.util.List groupedInputSpecs_ = java.util.Collections.emptyList(); private void ensureGroupedInputSpecsIsMutable() { - if (!((bitField0_ & 0x00000800) != 0)) { + if (!((bitField0_ & 0x00000800) == 0x00000800)) { groupedInputSpecs_ = new java.util.ArrayList(groupedInputSpecs_); bitField0_ |= 0x00000800; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder> groupedInputSpecsBuilder_; /** @@ -6866,8 +6192,7 @@ public Builder addAllGroupedInputSpecs( java.lang.Iterable values) { if (groupedInputSpecsBuilder_ == null) { ensureGroupedInputSpecsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, groupedInputSpecs_); + super.addAll(values, groupedInputSpecs_); onChanged(); } else { groupedInputSpecsBuilder_.addAllMessages(values); @@ -6950,14 +6275,14 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInpu getGroupedInputSpecsBuilderList() { return getGroupedInputSpecsFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder> getGroupedInputSpecsFieldBuilder() { if (groupedInputSpecsBuilder_ == null) { - groupedInputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + groupedInputSpecsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInputSpecProtoOrBuilder>( groupedInputSpecs_, - ((bitField0_ & 0x00000800) != 0), + ((bitField0_ & 0x00000800) == 0x00000800), getParentForChildren(), isClean()); groupedInputSpecs_ = null; @@ -6965,39 +6290,34 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GroupInpu return groupedInputSpecsBuilder_; } + // optional int32 vertex_parallelism = 13; private int vertexParallelism_ ; /** + * optional int32 vertex_parallelism = 13; + * *
        * An internal field required for Tez.
        * 
- * - * optional int32 vertex_parallelism = 13; - * @return Whether the vertexParallelism field is set. */ - @java.lang.Override public boolean hasVertexParallelism() { - return ((bitField0_ & 0x00001000) != 0); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** + * optional int32 vertex_parallelism = 13; + * *
        * An internal field required for Tez.
        * 
- * - * optional int32 vertex_parallelism = 13; - * @return The vertexParallelism. */ - @java.lang.Override public int getVertexParallelism() { return vertexParallelism_; } /** + * optional int32 vertex_parallelism = 13; + * *
        * An internal field required for Tez.
        * 
- * - * optional int32 vertex_parallelism = 13; - * @param value The vertexParallelism to set. - * @return This builder for chaining. */ public Builder setVertexParallelism(int value) { bitField0_ |= 0x00001000; @@ -7006,12 +6326,11 @@ public Builder setVertexParallelism(int value) { return this; } /** + * optional int32 vertex_parallelism = 13; + * *
        * An internal field required for Tez.
        * 
- * - * optional int32 vertex_parallelism = 13; - * @return This builder for chaining. */ public Builder clearVertexParallelism() { bitField0_ = (bitField0_ & ~0x00001000); @@ -7020,27 +6339,22 @@ public Builder clearVertexParallelism() { return this; } + // optional bool is_external_submission = 14 [default = false]; private boolean isExternalSubmission_ ; /** * optional bool is_external_submission = 14 [default = false]; - * @return Whether the isExternalSubmission field is set. */ - @java.lang.Override public boolean hasIsExternalSubmission() { - return ((bitField0_ & 0x00002000) != 0); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional bool is_external_submission = 14 [default = false]; - * @return The isExternalSubmission. */ - @java.lang.Override public boolean getIsExternalSubmission() { return isExternalSubmission_; } /** * optional bool is_external_submission = 14 [default = false]; - * @param value The isExternalSubmission to set. - * @return This builder for chaining. */ public Builder setIsExternalSubmission(boolean value) { bitField0_ |= 0x00002000; @@ -7050,7 +6364,6 @@ public Builder setIsExternalSubmission(boolean value) { } /** * optional bool is_external_submission = 14 [default = false]; - * @return This builder for chaining. */ public Builder clearIsExternalSubmission() { bitField0_ = (bitField0_ & ~0x00002000); @@ -7058,71 +6371,28 @@ public Builder clearIsExternalSubmission() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SignableVertexSpec) } - // @@protoc_insertion_point(class_scope:SignableVertexSpec) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SignableVertexSpec parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SignableVertexSpec(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SignableVertexSpec(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SignableVertexSpec) } - public interface VertexOrBinaryOrBuilder extends - // @@protoc_insertion_point(interface_extends:VertexOrBinary) - com.google.protobuf.MessageOrBuilder { + public interface VertexOrBinaryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .SignableVertexSpec vertex = 1; /** * optional .SignableVertexSpec vertex = 1; - * @return Whether the vertex field is set. */ boolean hasVertex(); /** * optional .SignableVertexSpec vertex = 1; - * @return The vertex. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec getVertex(); /** @@ -7130,65 +6400,61 @@ public interface VertexOrBinaryOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder getVertexOrBuilder(); + // optional bytes vertexBinary = 2; /** + * optional bytes vertexBinary = 2; + * *
      * SignableVertexSpec
      * 
- * - * optional bytes vertexBinary = 2; - * @return Whether the vertexBinary field is set. */ boolean hasVertexBinary(); /** + * optional bytes vertexBinary = 2; + * *
      * SignableVertexSpec
      * 
- * - * optional bytes vertexBinary = 2; - * @return The vertexBinary. */ com.google.protobuf.ByteString getVertexBinary(); } /** + * Protobuf type {@code VertexOrBinary} + * *
    * Union
    * 
- * - * Protobuf type {@code VertexOrBinary} */ public static final class VertexOrBinary extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:VertexOrBinary) - VertexOrBinaryOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements VertexOrBinaryOrBuilder { // Use VertexOrBinary.newBuilder() to construct. - private VertexOrBinary(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private VertexOrBinary(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private VertexOrBinary() { - vertexBinary_ = com.google.protobuf.ByteString.EMPTY; + private VertexOrBinary(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final VertexOrBinary defaultInstance; + public static VertexOrBinary getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new VertexOrBinary(); + public VertexOrBinary getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private VertexOrBinary( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -7200,9 +6466,16 @@ private VertexOrBinary( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = vertex_.toBuilder(); } vertex_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.PARSER, extensionRegistry); @@ -7218,22 +6491,13 @@ private VertexOrBinary( vertexBinary_ = input.readBytes(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -7244,110 +6508,126 @@ private VertexOrBinary( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexOrBinary_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexOrBinary_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public VertexOrBinary parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new VertexOrBinary(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .SignableVertexSpec vertex = 1; public static final int VERTEX_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec vertex_; /** * optional .SignableVertexSpec vertex = 1; - * @return Whether the vertex field is set. */ - @java.lang.Override public boolean hasVertex() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .SignableVertexSpec vertex = 1; - * @return The vertex. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec getVertex() { - return vertex_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance() : vertex_; + return vertex_; } /** * optional .SignableVertexSpec vertex = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder getVertexOrBuilder() { - return vertex_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance() : vertex_; + return vertex_; } + // optional bytes vertexBinary = 2; public static final int VERTEXBINARY_FIELD_NUMBER = 2; private com.google.protobuf.ByteString vertexBinary_; /** + * optional bytes vertexBinary = 2; + * *
      * SignableVertexSpec
      * 
- * - * optional bytes vertexBinary = 2; - * @return Whether the vertexBinary field is set. */ - @java.lang.Override public boolean hasVertexBinary() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** + * optional bytes vertexBinary = 2; + * *
      * SignableVertexSpec
      * 
- * - * optional bytes vertexBinary = 2; - * @return The vertexBinary. */ - @java.lang.Override public com.google.protobuf.ByteString getVertexBinary() { return vertexBinary_; } + private void initFields() { + vertex_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance(); + vertexBinary_ = com.google.protobuf.ByteString.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getVertex()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, vertex_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, vertexBinary_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getVertex()); + .computeMessageSize(1, vertex_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, vertexBinary_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -7358,27 +6638,30 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary) obj; - if (hasVertex() != other.hasVertex()) return false; + boolean result = true; + result = result && (hasVertex() == other.hasVertex()); if (hasVertex()) { - if (!getVertex() - .equals(other.getVertex())) return false; + result = result && getVertex() + .equals(other.getVertex()); } - if (hasVertexBinary() != other.hasVertexBinary()) return false; + result = result && (hasVertexBinary() == other.hasVertexBinary()); if (hasVertexBinary()) { - if (!getVertexBinary() - .equals(other.getVertexBinary())) return false; + result = result && getVertexBinary() + .equals(other.getVertexBinary()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasVertex()) { hash = (37 * hash) + VERTEX_FIELD_NUMBER; hash = (53 * hash) + getVertex().hashCode(); @@ -7387,22 +6670,11 @@ public int hashCode() { hash = (37 * hash) + VERTEXBINARY_FIELD_NUMBER; hash = (53 * hash) + getVertexBinary().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -7426,80 +6698,65 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ve } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** + * Protobuf type {@code VertexOrBinary} + * *
      * Union
      * 
- * - * Protobuf type {@code VertexOrBinary} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:VertexOrBinary) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexOrBinary_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexOrBinary_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -7512,21 +6769,23 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getVertexFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (vertexBuilder_ == null) { - vertex_ = null; + vertex_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance(); } else { vertexBuilder_.clear(); } @@ -7536,18 +6795,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_VertexOrBinary_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary result = buildPartial(); if (!result.isInitialized()) { @@ -7556,20 +6816,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrB return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (vertexBuilder_ == null) { - result.vertex_ = vertex_; - } else { - result.vertex_ = vertexBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { + if (vertexBuilder_ == null) { + result.vertex_ = vertex_; + } else { + result.vertex_ = vertexBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.vertexBinary_ = vertexBinary_; @@ -7578,39 +6837,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrB return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary)other); @@ -7628,17 +6854,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasVertexBinary()) { setVertexBinary(other.getVertexBinary()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -7648,7 +6871,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -7658,23 +6881,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec vertex_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .SignableVertexSpec vertex = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec vertex_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder> vertexBuilder_; /** * optional .SignableVertexSpec vertex = 1; - * @return Whether the vertex field is set. */ public boolean hasVertex() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .SignableVertexSpec vertex = 1; - * @return The vertex. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec getVertex() { if (vertexBuilder_ == null) { - return vertex_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance() : vertex_; + return vertex_; } else { return vertexBuilder_.getMessage(); } @@ -7714,8 +6936,7 @@ public Builder setVertex( */ public Builder mergeVertex(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec value) { if (vertexBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - vertex_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && vertex_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance()) { vertex_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.newBuilder(vertex_).mergeFrom(value).buildPartial(); @@ -7734,7 +6955,7 @@ public Builder mergeVertex(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProt */ public Builder clearVertex() { if (vertexBuilder_ == null) { - vertex_ = null; + vertex_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance(); onChanged(); } else { vertexBuilder_.clear(); @@ -7757,20 +6978,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableV if (vertexBuilder_ != null) { return vertexBuilder_.getMessageOrBuilder(); } else { - return vertex_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.getDefaultInstance() : vertex_; + return vertex_; } } /** * optional .SignableVertexSpec vertex = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder> getVertexFieldBuilder() { if (vertexBuilder_ == null) { - vertexBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + vertexBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpec.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableVertexSpecOrBuilder>( - getVertex(), + vertex_, getParentForChildren(), isClean()); vertex_ = null; @@ -7778,39 +6998,34 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SignableV return vertexBuilder_; } + // optional bytes vertexBinary = 2; private com.google.protobuf.ByteString vertexBinary_ = com.google.protobuf.ByteString.EMPTY; /** + * optional bytes vertexBinary = 2; + * *
        * SignableVertexSpec
        * 
- * - * optional bytes vertexBinary = 2; - * @return Whether the vertexBinary field is set. */ - @java.lang.Override public boolean hasVertexBinary() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** + * optional bytes vertexBinary = 2; + * *
        * SignableVertexSpec
        * 
- * - * optional bytes vertexBinary = 2; - * @return The vertexBinary. */ - @java.lang.Override public com.google.protobuf.ByteString getVertexBinary() { return vertexBinary_; } /** + * optional bytes vertexBinary = 2; + * *
        * SignableVertexSpec
        * 
- * - * optional bytes vertexBinary = 2; - * @param value The vertexBinary to set. - * @return This builder for chaining. */ public Builder setVertexBinary(com.google.protobuf.ByteString value) { if (value == null) { @@ -7822,12 +7037,11 @@ public Builder setVertexBinary(com.google.protobuf.ByteString value) { return this; } /** + * optional bytes vertexBinary = 2; + * *
        * SignableVertexSpec
        * 
- * - * optional bytes vertexBinary = 2; - * @return This builder for chaining. */ public Builder clearVertexBinary() { bitField0_ = (bitField0_ & ~0x00000002); @@ -7835,126 +7049,78 @@ public Builder clearVertexBinary() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:VertexOrBinary) } - // @@protoc_insertion_point(class_scope:VertexOrBinary) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public VertexOrBinary parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new VertexOrBinary(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new VertexOrBinary(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:VertexOrBinary) } - public interface FragmentRuntimeInfoOrBuilder extends - // @@protoc_insertion_point(interface_extends:FragmentRuntimeInfo) - com.google.protobuf.MessageOrBuilder { + public interface FragmentRuntimeInfoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional int32 num_self_and_upstream_tasks = 1; /** * optional int32 num_self_and_upstream_tasks = 1; - * @return Whether the numSelfAndUpstreamTasks field is set. */ boolean hasNumSelfAndUpstreamTasks(); /** * optional int32 num_self_and_upstream_tasks = 1; - * @return The numSelfAndUpstreamTasks. */ int getNumSelfAndUpstreamTasks(); + // optional int32 num_self_and_upstream_completed_tasks = 2; /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return Whether the numSelfAndUpstreamCompletedTasks field is set. */ boolean hasNumSelfAndUpstreamCompletedTasks(); /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return The numSelfAndUpstreamCompletedTasks. */ int getNumSelfAndUpstreamCompletedTasks(); + // optional int32 within_dag_priority = 3; /** * optional int32 within_dag_priority = 3; - * @return Whether the withinDagPriority field is set. */ boolean hasWithinDagPriority(); /** * optional int32 within_dag_priority = 3; - * @return The withinDagPriority. */ int getWithinDagPriority(); + // optional int64 dag_start_time = 4; /** * optional int64 dag_start_time = 4; - * @return Whether the dagStartTime field is set. */ boolean hasDagStartTime(); /** * optional int64 dag_start_time = 4; - * @return The dagStartTime. */ long getDagStartTime(); + // optional int64 first_attempt_start_time = 5; /** * optional int64 first_attempt_start_time = 5; - * @return Whether the firstAttemptStartTime field is set. */ boolean hasFirstAttemptStartTime(); /** * optional int64 first_attempt_start_time = 5; - * @return The firstAttemptStartTime. */ long getFirstAttemptStartTime(); + // optional int64 current_attempt_start_time = 6; /** * optional int64 current_attempt_start_time = 6; - * @return Whether the currentAttemptStartTime field is set. */ boolean hasCurrentAttemptStartTime(); /** * optional int64 current_attempt_start_time = 6; - * @return The currentAttemptStartTime. */ long getCurrentAttemptStartTime(); } @@ -7962,37 +7128,35 @@ public interface FragmentRuntimeInfoOrBuilder extends * Protobuf type {@code FragmentRuntimeInfo} */ public static final class FragmentRuntimeInfo extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:FragmentRuntimeInfo) - FragmentRuntimeInfoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements FragmentRuntimeInfoOrBuilder { // Use FragmentRuntimeInfo.newBuilder() to construct. - private FragmentRuntimeInfo(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private FragmentRuntimeInfo(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private FragmentRuntimeInfo() { + private FragmentRuntimeInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final FragmentRuntimeInfo defaultInstance; + public static FragmentRuntimeInfo getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new FragmentRuntimeInfo(); + public FragmentRuntimeInfo getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private FragmentRuntimeInfo( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -8004,6 +7168,13 @@ private FragmentRuntimeInfo( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 8: { bitField0_ |= 0x00000001; numSelfAndUpstreamTasks_ = input.readInt32(); @@ -8034,22 +7205,13 @@ private FragmentRuntimeInfo( currentAttemptStartTime_ = input.readInt64(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -8060,199 +7222,208 @@ private FragmentRuntimeInfo( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentRuntimeInfo_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentRuntimeInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public FragmentRuntimeInfo parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new FragmentRuntimeInfo(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional int32 num_self_and_upstream_tasks = 1; public static final int NUM_SELF_AND_UPSTREAM_TASKS_FIELD_NUMBER = 1; private int numSelfAndUpstreamTasks_; /** * optional int32 num_self_and_upstream_tasks = 1; - * @return Whether the numSelfAndUpstreamTasks field is set. */ - @java.lang.Override public boolean hasNumSelfAndUpstreamTasks() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int32 num_self_and_upstream_tasks = 1; - * @return The numSelfAndUpstreamTasks. */ - @java.lang.Override public int getNumSelfAndUpstreamTasks() { return numSelfAndUpstreamTasks_; } + // optional int32 num_self_and_upstream_completed_tasks = 2; public static final int NUM_SELF_AND_UPSTREAM_COMPLETED_TASKS_FIELD_NUMBER = 2; private int numSelfAndUpstreamCompletedTasks_; /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return Whether the numSelfAndUpstreamCompletedTasks field is set. */ - @java.lang.Override public boolean hasNumSelfAndUpstreamCompletedTasks() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return The numSelfAndUpstreamCompletedTasks. */ - @java.lang.Override public int getNumSelfAndUpstreamCompletedTasks() { return numSelfAndUpstreamCompletedTasks_; } + // optional int32 within_dag_priority = 3; public static final int WITHIN_DAG_PRIORITY_FIELD_NUMBER = 3; private int withinDagPriority_; /** * optional int32 within_dag_priority = 3; - * @return Whether the withinDagPriority field is set. */ - @java.lang.Override public boolean hasWithinDagPriority() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 within_dag_priority = 3; - * @return The withinDagPriority. */ - @java.lang.Override public int getWithinDagPriority() { return withinDagPriority_; } + // optional int64 dag_start_time = 4; public static final int DAG_START_TIME_FIELD_NUMBER = 4; private long dagStartTime_; /** * optional int64 dag_start_time = 4; - * @return Whether the dagStartTime field is set. */ - @java.lang.Override public boolean hasDagStartTime() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int64 dag_start_time = 4; - * @return The dagStartTime. */ - @java.lang.Override public long getDagStartTime() { return dagStartTime_; } + // optional int64 first_attempt_start_time = 5; public static final int FIRST_ATTEMPT_START_TIME_FIELD_NUMBER = 5; private long firstAttemptStartTime_; /** * optional int64 first_attempt_start_time = 5; - * @return Whether the firstAttemptStartTime field is set. */ - @java.lang.Override public boolean hasFirstAttemptStartTime() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional int64 first_attempt_start_time = 5; - * @return The firstAttemptStartTime. */ - @java.lang.Override public long getFirstAttemptStartTime() { return firstAttemptStartTime_; } + // optional int64 current_attempt_start_time = 6; public static final int CURRENT_ATTEMPT_START_TIME_FIELD_NUMBER = 6; private long currentAttemptStartTime_; /** * optional int64 current_attempt_start_time = 6; - * @return Whether the currentAttemptStartTime field is set. */ - @java.lang.Override public boolean hasCurrentAttemptStartTime() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional int64 current_attempt_start_time = 6; - * @return The currentAttemptStartTime. */ - @java.lang.Override public long getCurrentAttemptStartTime() { return currentAttemptStartTime_; } + private void initFields() { + numSelfAndUpstreamTasks_ = 0; + numSelfAndUpstreamCompletedTasks_ = 0; + withinDagPriority_ = 0; + dagStartTime_ = 0L; + firstAttemptStartTime_ = 0L; + currentAttemptStartTime_ = 0L; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt32(1, numSelfAndUpstreamTasks_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, numSelfAndUpstreamCompletedTasks_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, withinDagPriority_); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt64(4, dagStartTime_); } - if (((bitField0_ & 0x00000010) != 0)) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeInt64(5, firstAttemptStartTime_); } - if (((bitField0_ & 0x00000020) != 0)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeInt64(6, currentAttemptStartTime_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(1, numSelfAndUpstreamTasks_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, numSelfAndUpstreamCompletedTasks_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, withinDagPriority_); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(4, dagStartTime_); } - if (((bitField0_ & 0x00000010) != 0)) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(5, firstAttemptStartTime_); } - if (((bitField0_ & 0x00000020) != 0)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(6, currentAttemptStartTime_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -8263,47 +7434,50 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo) obj; - if (hasNumSelfAndUpstreamTasks() != other.hasNumSelfAndUpstreamTasks()) return false; + boolean result = true; + result = result && (hasNumSelfAndUpstreamTasks() == other.hasNumSelfAndUpstreamTasks()); if (hasNumSelfAndUpstreamTasks()) { - if (getNumSelfAndUpstreamTasks() - != other.getNumSelfAndUpstreamTasks()) return false; + result = result && (getNumSelfAndUpstreamTasks() + == other.getNumSelfAndUpstreamTasks()); } - if (hasNumSelfAndUpstreamCompletedTasks() != other.hasNumSelfAndUpstreamCompletedTasks()) return false; + result = result && (hasNumSelfAndUpstreamCompletedTasks() == other.hasNumSelfAndUpstreamCompletedTasks()); if (hasNumSelfAndUpstreamCompletedTasks()) { - if (getNumSelfAndUpstreamCompletedTasks() - != other.getNumSelfAndUpstreamCompletedTasks()) return false; + result = result && (getNumSelfAndUpstreamCompletedTasks() + == other.getNumSelfAndUpstreamCompletedTasks()); } - if (hasWithinDagPriority() != other.hasWithinDagPriority()) return false; + result = result && (hasWithinDagPriority() == other.hasWithinDagPriority()); if (hasWithinDagPriority()) { - if (getWithinDagPriority() - != other.getWithinDagPriority()) return false; + result = result && (getWithinDagPriority() + == other.getWithinDagPriority()); } - if (hasDagStartTime() != other.hasDagStartTime()) return false; + result = result && (hasDagStartTime() == other.hasDagStartTime()); if (hasDagStartTime()) { - if (getDagStartTime() - != other.getDagStartTime()) return false; + result = result && (getDagStartTime() + == other.getDagStartTime()); } - if (hasFirstAttemptStartTime() != other.hasFirstAttemptStartTime()) return false; + result = result && (hasFirstAttemptStartTime() == other.hasFirstAttemptStartTime()); if (hasFirstAttemptStartTime()) { - if (getFirstAttemptStartTime() - != other.getFirstAttemptStartTime()) return false; + result = result && (getFirstAttemptStartTime() + == other.getFirstAttemptStartTime()); } - if (hasCurrentAttemptStartTime() != other.hasCurrentAttemptStartTime()) return false; + result = result && (hasCurrentAttemptStartTime() == other.hasCurrentAttemptStartTime()); if (hasCurrentAttemptStartTime()) { - if (getCurrentAttemptStartTime() - != other.getCurrentAttemptStartTime()) return false; + result = result && (getCurrentAttemptStartTime() + == other.getCurrentAttemptStartTime()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasNumSelfAndUpstreamTasks()) { hash = (37 * hash) + NUM_SELF_AND_UPSTREAM_TASKS_FIELD_NUMBER; hash = (53 * hash) + getNumSelfAndUpstreamTasks(); @@ -8318,35 +7492,21 @@ public int hashCode() { } if (hasDagStartTime()) { hash = (37 * hash) + DAG_START_TIME_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getDagStartTime()); + hash = (53 * hash) + hashLong(getDagStartTime()); } if (hasFirstAttemptStartTime()) { hash = (37 * hash) + FIRST_ATTEMPT_START_TIME_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getFirstAttemptStartTime()); + hash = (53 * hash) + hashLong(getFirstAttemptStartTime()); } if (hasCurrentAttemptStartTime()) { hash = (37 * hash) + CURRENT_ATTEMPT_START_TIME_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getCurrentAttemptStartTime()); + hash = (53 * hash) + hashLong(getCurrentAttemptStartTime()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -8370,59 +7530,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Fr } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -8430,16 +7577,14 @@ protected Builder newBuilderForType( * Protobuf type {@code FragmentRuntimeInfo} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:FragmentRuntimeInfo) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentRuntimeInfo_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentRuntimeInfo_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -8452,16 +7597,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); numSelfAndUpstreamTasks_ = 0; @@ -8479,18 +7626,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_FragmentRuntimeInfo_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo result = buildPartial(); if (!result.isInitialized()) { @@ -8499,73 +7647,39 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentR return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.numSelfAndUpstreamTasks_ = numSelfAndUpstreamTasks_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { - result.numSelfAndUpstreamCompletedTasks_ = numSelfAndUpstreamCompletedTasks_; + result.numSelfAndUpstreamTasks_ = numSelfAndUpstreamTasks_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { - result.withinDagPriority_ = withinDagPriority_; + result.numSelfAndUpstreamCompletedTasks_ = numSelfAndUpstreamCompletedTasks_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - if (((from_bitField0_ & 0x00000008) != 0)) { - result.dagStartTime_ = dagStartTime_; + result.withinDagPriority_ = withinDagPriority_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - if (((from_bitField0_ & 0x00000010) != 0)) { - result.firstAttemptStartTime_ = firstAttemptStartTime_; + result.dagStartTime_ = dagStartTime_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } - if (((from_bitField0_ & 0x00000020) != 0)) { - result.currentAttemptStartTime_ = currentAttemptStartTime_; + result.firstAttemptStartTime_ = firstAttemptStartTime_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } + result.currentAttemptStartTime_ = currentAttemptStartTime_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo)other); @@ -8595,17 +7709,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasCurrentAttemptStartTime()) { setCurrentAttemptStartTime(other.getCurrentAttemptStartTime()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -8615,7 +7726,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -8625,27 +7736,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional int32 num_self_and_upstream_tasks = 1; private int numSelfAndUpstreamTasks_ ; /** * optional int32 num_self_and_upstream_tasks = 1; - * @return Whether the numSelfAndUpstreamTasks field is set. */ - @java.lang.Override public boolean hasNumSelfAndUpstreamTasks() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int32 num_self_and_upstream_tasks = 1; - * @return The numSelfAndUpstreamTasks. */ - @java.lang.Override public int getNumSelfAndUpstreamTasks() { return numSelfAndUpstreamTasks_; } /** * optional int32 num_self_and_upstream_tasks = 1; - * @param value The numSelfAndUpstreamTasks to set. - * @return This builder for chaining. */ public Builder setNumSelfAndUpstreamTasks(int value) { bitField0_ |= 0x00000001; @@ -8655,7 +7761,6 @@ public Builder setNumSelfAndUpstreamTasks(int value) { } /** * optional int32 num_self_and_upstream_tasks = 1; - * @return This builder for chaining. */ public Builder clearNumSelfAndUpstreamTasks() { bitField0_ = (bitField0_ & ~0x00000001); @@ -8664,27 +7769,22 @@ public Builder clearNumSelfAndUpstreamTasks() { return this; } + // optional int32 num_self_and_upstream_completed_tasks = 2; private int numSelfAndUpstreamCompletedTasks_ ; /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return Whether the numSelfAndUpstreamCompletedTasks field is set. */ - @java.lang.Override public boolean hasNumSelfAndUpstreamCompletedTasks() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return The numSelfAndUpstreamCompletedTasks. */ - @java.lang.Override public int getNumSelfAndUpstreamCompletedTasks() { return numSelfAndUpstreamCompletedTasks_; } /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @param value The numSelfAndUpstreamCompletedTasks to set. - * @return This builder for chaining. */ public Builder setNumSelfAndUpstreamCompletedTasks(int value) { bitField0_ |= 0x00000002; @@ -8694,7 +7794,6 @@ public Builder setNumSelfAndUpstreamCompletedTasks(int value) { } /** * optional int32 num_self_and_upstream_completed_tasks = 2; - * @return This builder for chaining. */ public Builder clearNumSelfAndUpstreamCompletedTasks() { bitField0_ = (bitField0_ & ~0x00000002); @@ -8703,27 +7802,22 @@ public Builder clearNumSelfAndUpstreamCompletedTasks() { return this; } + // optional int32 within_dag_priority = 3; private int withinDagPriority_ ; /** * optional int32 within_dag_priority = 3; - * @return Whether the withinDagPriority field is set. */ - @java.lang.Override public boolean hasWithinDagPriority() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 within_dag_priority = 3; - * @return The withinDagPriority. */ - @java.lang.Override public int getWithinDagPriority() { return withinDagPriority_; } /** * optional int32 within_dag_priority = 3; - * @param value The withinDagPriority to set. - * @return This builder for chaining. */ public Builder setWithinDagPriority(int value) { bitField0_ |= 0x00000004; @@ -8733,7 +7827,6 @@ public Builder setWithinDagPriority(int value) { } /** * optional int32 within_dag_priority = 3; - * @return This builder for chaining. */ public Builder clearWithinDagPriority() { bitField0_ = (bitField0_ & ~0x00000004); @@ -8742,27 +7835,22 @@ public Builder clearWithinDagPriority() { return this; } + // optional int64 dag_start_time = 4; private long dagStartTime_ ; /** * optional int64 dag_start_time = 4; - * @return Whether the dagStartTime field is set. */ - @java.lang.Override public boolean hasDagStartTime() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int64 dag_start_time = 4; - * @return The dagStartTime. */ - @java.lang.Override public long getDagStartTime() { return dagStartTime_; } /** * optional int64 dag_start_time = 4; - * @param value The dagStartTime to set. - * @return This builder for chaining. */ public Builder setDagStartTime(long value) { bitField0_ |= 0x00000008; @@ -8772,7 +7860,6 @@ public Builder setDagStartTime(long value) { } /** * optional int64 dag_start_time = 4; - * @return This builder for chaining. */ public Builder clearDagStartTime() { bitField0_ = (bitField0_ & ~0x00000008); @@ -8781,27 +7868,22 @@ public Builder clearDagStartTime() { return this; } + // optional int64 first_attempt_start_time = 5; private long firstAttemptStartTime_ ; /** * optional int64 first_attempt_start_time = 5; - * @return Whether the firstAttemptStartTime field is set. */ - @java.lang.Override public boolean hasFirstAttemptStartTime() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional int64 first_attempt_start_time = 5; - * @return The firstAttemptStartTime. */ - @java.lang.Override public long getFirstAttemptStartTime() { return firstAttemptStartTime_; } /** * optional int64 first_attempt_start_time = 5; - * @param value The firstAttemptStartTime to set. - * @return This builder for chaining. */ public Builder setFirstAttemptStartTime(long value) { bitField0_ |= 0x00000010; @@ -8811,7 +7893,6 @@ public Builder setFirstAttemptStartTime(long value) { } /** * optional int64 first_attempt_start_time = 5; - * @return This builder for chaining. */ public Builder clearFirstAttemptStartTime() { bitField0_ = (bitField0_ & ~0x00000010); @@ -8820,27 +7901,22 @@ public Builder clearFirstAttemptStartTime() { return this; } + // optional int64 current_attempt_start_time = 6; private long currentAttemptStartTime_ ; /** * optional int64 current_attempt_start_time = 6; - * @return Whether the currentAttemptStartTime field is set. */ - @java.lang.Override public boolean hasCurrentAttemptStartTime() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional int64 current_attempt_start_time = 6; - * @return The currentAttemptStartTime. */ - @java.lang.Override public long getCurrentAttemptStartTime() { return currentAttemptStartTime_; } /** * optional int64 current_attempt_start_time = 6; - * @param value The currentAttemptStartTime to set. - * @return This builder for chaining. */ public Builder setCurrentAttemptStartTime(long value) { bitField0_ |= 0x00000020; @@ -8850,7 +7926,6 @@ public Builder setCurrentAttemptStartTime(long value) { } /** * optional int64 current_attempt_start_time = 6; - * @return This builder for chaining. */ public Builder clearCurrentAttemptStartTime() { bitField0_ = (bitField0_ & ~0x00000020); @@ -8858,99 +7933,53 @@ public Builder clearCurrentAttemptStartTime() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:FragmentRuntimeInfo) } - // @@protoc_insertion_point(class_scope:FragmentRuntimeInfo) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FragmentRuntimeInfo parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FragmentRuntimeInfo(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new FragmentRuntimeInfo(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:FragmentRuntimeInfo) } - public interface QueryIdentifierProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:QueryIdentifierProto) - com.google.protobuf.MessageOrBuilder { + public interface QueryIdentifierProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string application_id_string = 1; /** * optional string application_id_string = 1; - * @return Whether the applicationIdString field is set. */ boolean hasApplicationIdString(); /** * optional string application_id_string = 1; - * @return The applicationIdString. */ java.lang.String getApplicationIdString(); /** * optional string application_id_string = 1; - * @return The bytes for applicationIdString. */ com.google.protobuf.ByteString getApplicationIdStringBytes(); + // optional int32 dag_index = 2; /** * optional int32 dag_index = 2; - * @return Whether the dagIndex field is set. */ boolean hasDagIndex(); /** * optional int32 dag_index = 2; - * @return The dagIndex. */ int getDagIndex(); + // optional int32 app_attempt_number = 3; /** * optional int32 app_attempt_number = 3; - * @return Whether the appAttemptNumber field is set. */ boolean hasAppAttemptNumber(); /** * optional int32 app_attempt_number = 3; - * @return The appAttemptNumber. */ int getAppAttemptNumber(); } @@ -8958,38 +7987,35 @@ public interface QueryIdentifierProtoOrBuilder extends * Protobuf type {@code QueryIdentifierProto} */ public static final class QueryIdentifierProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:QueryIdentifierProto) - QueryIdentifierProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements QueryIdentifierProtoOrBuilder { // Use QueryIdentifierProto.newBuilder() to construct. - private QueryIdentifierProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private QueryIdentifierProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private QueryIdentifierProto() { - applicationIdString_ = ""; + private QueryIdentifierProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final QueryIdentifierProto defaultInstance; + public static QueryIdentifierProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new QueryIdentifierProto(); + public QueryIdentifierProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private QueryIdentifierProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -9001,10 +8027,16 @@ private QueryIdentifierProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - applicationIdString_ = bs; + applicationIdString_ = input.readBytes(); break; } case 16: { @@ -9017,22 +8049,13 @@ private QueryIdentifierProto( appAttemptNumber_ = input.readInt32(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -9043,30 +8066,41 @@ private QueryIdentifierProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public QueryIdentifierProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new QueryIdentifierProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string application_id_string = 1; public static final int APPLICATION_ID_STRING_FIELD_NUMBER = 1; - private volatile java.lang.Object applicationIdString_; + private java.lang.Object applicationIdString_; /** * optional string application_id_string = 1; - * @return Whether the applicationIdString field is set. */ - @java.lang.Override public boolean hasApplicationIdString() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string application_id_string = 1; - * @return The applicationIdString. */ - @java.lang.Override public java.lang.String getApplicationIdString() { java.lang.Object ref = applicationIdString_; if (ref instanceof java.lang.String) { @@ -9083,9 +8117,7 @@ public java.lang.String getApplicationIdString() { } /** * optional string application_id_string = 1; - * @return The bytes for applicationIdString. */ - @java.lang.Override public com.google.protobuf.ByteString getApplicationIdStringBytes() { java.lang.Object ref = applicationIdString_; @@ -9100,92 +8132,97 @@ public java.lang.String getApplicationIdString() { } } + // optional int32 dag_index = 2; public static final int DAG_INDEX_FIELD_NUMBER = 2; private int dagIndex_; /** * optional int32 dag_index = 2; - * @return Whether the dagIndex field is set. */ - @java.lang.Override public boolean hasDagIndex() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 dag_index = 2; - * @return The dagIndex. */ - @java.lang.Override public int getDagIndex() { return dagIndex_; } + // optional int32 app_attempt_number = 3; public static final int APP_ATTEMPT_NUMBER_FIELD_NUMBER = 3; private int appAttemptNumber_; /** * optional int32 app_attempt_number = 3; - * @return Whether the appAttemptNumber field is set. */ - @java.lang.Override public boolean hasAppAttemptNumber() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 app_attempt_number = 3; - * @return The appAttemptNumber. */ - @java.lang.Override public int getAppAttemptNumber() { return appAttemptNumber_; } + private void initFields() { + applicationIdString_ = ""; + dagIndex_ = 0; + appAttemptNumber_ = 0; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, applicationIdString_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getApplicationIdStringBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, dagIndex_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, appAttemptNumber_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, applicationIdString_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getApplicationIdStringBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, dagIndex_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, appAttemptNumber_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -9196,32 +8233,35 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) obj; - if (hasApplicationIdString() != other.hasApplicationIdString()) return false; + boolean result = true; + result = result && (hasApplicationIdString() == other.hasApplicationIdString()); if (hasApplicationIdString()) { - if (!getApplicationIdString() - .equals(other.getApplicationIdString())) return false; + result = result && getApplicationIdString() + .equals(other.getApplicationIdString()); } - if (hasDagIndex() != other.hasDagIndex()) return false; + result = result && (hasDagIndex() == other.hasDagIndex()); if (hasDagIndex()) { - if (getDagIndex() - != other.getDagIndex()) return false; + result = result && (getDagIndex() + == other.getDagIndex()); } - if (hasAppAttemptNumber() != other.hasAppAttemptNumber()) return false; + result = result && (hasAppAttemptNumber() == other.hasAppAttemptNumber()); if (hasAppAttemptNumber()) { - if (getAppAttemptNumber() - != other.getAppAttemptNumber()) return false; + result = result && (getAppAttemptNumber() + == other.getAppAttemptNumber()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasApplicationIdString()) { hash = (37 * hash) + APPLICATION_ID_STRING_FIELD_NUMBER; hash = (53 * hash) + getApplicationIdString().hashCode(); @@ -9234,22 +8274,11 @@ public int hashCode() { hash = (37 * hash) + APP_ATTEMPT_NUMBER_FIELD_NUMBER; hash = (53 * hash) + getAppAttemptNumber(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -9273,59 +8302,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Qu } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -9333,16 +8349,14 @@ protected Builder newBuilderForType( * Protobuf type {@code QueryIdentifierProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:QueryIdentifierProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -9355,16 +8369,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); applicationIdString_ = ""; @@ -9376,18 +8392,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryIdentifierProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto result = buildPartial(); if (!result.isInitialized()) { @@ -9396,61 +8413,27 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.applicationIdString_ = applicationIdString_; - if (((from_bitField0_ & 0x00000002) != 0)) { - result.dagIndex_ = dagIndex_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { - result.appAttemptNumber_ = appAttemptNumber_; + result.dagIndex_ = dagIndex_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + result.appAttemptNumber_ = appAttemptNumber_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto)other); @@ -9473,17 +8456,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasAppAttemptNumber()) { setAppAttemptNumber(other.getAppAttemptNumber()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -9493,7 +8473,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -9503,27 +8483,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string application_id_string = 1; private java.lang.Object applicationIdString_ = ""; /** * optional string application_id_string = 1; - * @return Whether the applicationIdString field is set. */ public boolean hasApplicationIdString() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string application_id_string = 1; - * @return The applicationIdString. */ public java.lang.String getApplicationIdString() { java.lang.Object ref = applicationIdString_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - applicationIdString_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + applicationIdString_ = s; return s; } else { return (java.lang.String) ref; @@ -9531,7 +8507,6 @@ public java.lang.String getApplicationIdString() { } /** * optional string application_id_string = 1; - * @return The bytes for applicationIdString. */ public com.google.protobuf.ByteString getApplicationIdStringBytes() { @@ -9548,8 +8523,6 @@ public java.lang.String getApplicationIdString() { } /** * optional string application_id_string = 1; - * @param value The applicationIdString to set. - * @return This builder for chaining. */ public Builder setApplicationIdString( java.lang.String value) { @@ -9563,7 +8536,6 @@ public Builder setApplicationIdString( } /** * optional string application_id_string = 1; - * @return This builder for chaining. */ public Builder clearApplicationIdString() { bitField0_ = (bitField0_ & ~0x00000001); @@ -9573,8 +8545,6 @@ public Builder clearApplicationIdString() { } /** * optional string application_id_string = 1; - * @param value The bytes for applicationIdString to set. - * @return This builder for chaining. */ public Builder setApplicationIdStringBytes( com.google.protobuf.ByteString value) { @@ -9587,27 +8557,22 @@ public Builder setApplicationIdStringBytes( return this; } + // optional int32 dag_index = 2; private int dagIndex_ ; /** * optional int32 dag_index = 2; - * @return Whether the dagIndex field is set. */ - @java.lang.Override public boolean hasDagIndex() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 dag_index = 2; - * @return The dagIndex. */ - @java.lang.Override public int getDagIndex() { return dagIndex_; } /** * optional int32 dag_index = 2; - * @param value The dagIndex to set. - * @return This builder for chaining. */ public Builder setDagIndex(int value) { bitField0_ |= 0x00000002; @@ -9617,7 +8582,6 @@ public Builder setDagIndex(int value) { } /** * optional int32 dag_index = 2; - * @return This builder for chaining. */ public Builder clearDagIndex() { bitField0_ = (bitField0_ & ~0x00000002); @@ -9626,27 +8590,22 @@ public Builder clearDagIndex() { return this; } + // optional int32 app_attempt_number = 3; private int appAttemptNumber_ ; /** * optional int32 app_attempt_number = 3; - * @return Whether the appAttemptNumber field is set. */ - @java.lang.Override public boolean hasAppAttemptNumber() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 app_attempt_number = 3; - * @return The appAttemptNumber. */ - @java.lang.Override public int getAppAttemptNumber() { return appAttemptNumber_; } /** * optional int32 app_attempt_number = 3; - * @param value The appAttemptNumber to set. - * @return This builder for chaining. */ public Builder setAppAttemptNumber(int value) { bitField0_ |= 0x00000004; @@ -9656,7 +8615,6 @@ public Builder setAppAttemptNumber(int value) { } /** * optional int32 app_attempt_number = 3; - * @return This builder for chaining. */ public Builder clearAppAttemptNumber() { bitField0_ = (bitField0_ & ~0x00000004); @@ -9664,120 +8622,74 @@ public Builder clearAppAttemptNumber() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:QueryIdentifierProto) } - // @@protoc_insertion_point(class_scope:QueryIdentifierProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public QueryIdentifierProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new QueryIdentifierProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new QueryIdentifierProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:QueryIdentifierProto) } - public interface NotTezEventOrBuilder extends - // @@protoc_insertion_point(interface_extends:NotTezEvent) - com.google.protobuf.MessageOrBuilder { + public interface NotTezEventOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // required bytes input_event_proto_bytes = 1; /** * required bytes input_event_proto_bytes = 1; - * @return Whether the inputEventProtoBytes field is set. */ boolean hasInputEventProtoBytes(); /** * required bytes input_event_proto_bytes = 1; - * @return The inputEventProtoBytes. */ com.google.protobuf.ByteString getInputEventProtoBytes(); + // required string vertex_name = 2; /** * required string vertex_name = 2; - * @return Whether the vertexName field is set. */ boolean hasVertexName(); /** * required string vertex_name = 2; - * @return The vertexName. */ java.lang.String getVertexName(); /** * required string vertex_name = 2; - * @return The bytes for vertexName. */ com.google.protobuf.ByteString getVertexNameBytes(); + // required string dest_input_name = 3; /** * required string dest_input_name = 3; - * @return Whether the destInputName field is set. */ boolean hasDestInputName(); /** * required string dest_input_name = 3; - * @return The destInputName. */ java.lang.String getDestInputName(); /** * required string dest_input_name = 3; - * @return The bytes for destInputName. */ com.google.protobuf.ByteString getDestInputNameBytes(); + // optional int32 key_id = 4; /** * optional int32 key_id = 4; - * @return Whether the keyId field is set. */ boolean hasKeyId(); /** * optional int32 key_id = 4; - * @return The keyId. */ int getKeyId(); } /** + * Protobuf type {@code NotTezEvent} + * *
    **
    * Tez API implementation derives an enum value from instanceof on the event, then uses that enum
@@ -9786,44 +8698,37 @@ public interface NotTezEventOrBuilder extends
    * three times over to add anything there. So, we'd do our own "inspired" serialization.
    * Eventually we'll move away from events for API.
    * 
- * - * Protobuf type {@code NotTezEvent} */ public static final class NotTezEvent extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:NotTezEvent) - NotTezEventOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements NotTezEventOrBuilder { // Use NotTezEvent.newBuilder() to construct. - private NotTezEvent(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private NotTezEvent(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private NotTezEvent() { - inputEventProtoBytes_ = com.google.protobuf.ByteString.EMPTY; - vertexName_ = ""; - destInputName_ = ""; + private NotTezEvent(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final NotTezEvent defaultInstance; + public static NotTezEvent getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new NotTezEvent(); + public NotTezEvent getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private NotTezEvent( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -9835,21 +8740,26 @@ private NotTezEvent( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { bitField0_ |= 0x00000001; inputEventProtoBytes_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - vertexName_ = bs; + vertexName_ = input.readBytes(); break; } case 26: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000004; - destInputName_ = bs; + destInputName_ = input.readBytes(); break; } case 32: { @@ -9857,22 +8767,13 @@ private NotTezEvent( keyId_ = input.readInt32(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -9883,49 +8784,57 @@ private NotTezEvent( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_NotTezEvent_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_NotTezEvent_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public NotTezEvent parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new NotTezEvent(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // required bytes input_event_proto_bytes = 1; public static final int INPUT_EVENT_PROTO_BYTES_FIELD_NUMBER = 1; private com.google.protobuf.ByteString inputEventProtoBytes_; /** * required bytes input_event_proto_bytes = 1; - * @return Whether the inputEventProtoBytes field is set. */ - @java.lang.Override public boolean hasInputEventProtoBytes() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bytes input_event_proto_bytes = 1; - * @return The inputEventProtoBytes. */ - @java.lang.Override public com.google.protobuf.ByteString getInputEventProtoBytes() { return inputEventProtoBytes_; } + // required string vertex_name = 2; public static final int VERTEX_NAME_FIELD_NUMBER = 2; - private volatile java.lang.Object vertexName_; + private java.lang.Object vertexName_; /** * required string vertex_name = 2; - * @return Whether the vertexName field is set. */ - @java.lang.Override public boolean hasVertexName() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string vertex_name = 2; - * @return The vertexName. */ - @java.lang.Override public java.lang.String getVertexName() { java.lang.Object ref = vertexName_; if (ref instanceof java.lang.String) { @@ -9942,9 +8851,7 @@ public java.lang.String getVertexName() { } /** * required string vertex_name = 2; - * @return The bytes for vertexName. */ - @java.lang.Override public com.google.protobuf.ByteString getVertexNameBytes() { java.lang.Object ref = vertexName_; @@ -9959,21 +8866,18 @@ public java.lang.String getVertexName() { } } + // required string dest_input_name = 3; public static final int DEST_INPUT_NAME_FIELD_NUMBER = 3; - private volatile java.lang.Object destInputName_; + private java.lang.Object destInputName_; /** * required string dest_input_name = 3; - * @return Whether the destInputName field is set. */ - @java.lang.Override public boolean hasDestInputName() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string dest_input_name = 3; - * @return The destInputName. */ - @java.lang.Override public java.lang.String getDestInputName() { java.lang.Object ref = destInputName_; if (ref instanceof java.lang.String) { @@ -9990,9 +8894,7 @@ public java.lang.String getDestInputName() { } /** * required string dest_input_name = 3; - * @return The bytes for destInputName. */ - @java.lang.Override public com.google.protobuf.ByteString getDestInputNameBytes() { java.lang.Object ref = destInputName_; @@ -10007,31 +8909,32 @@ public java.lang.String getDestInputName() { } } + // optional int32 key_id = 4; public static final int KEY_ID_FIELD_NUMBER = 4; private int keyId_; /** * optional int32 key_id = 4; - * @return Whether the keyId field is set. */ - @java.lang.Override public boolean hasKeyId() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int32 key_id = 4; - * @return The keyId. */ - @java.lang.Override public int getKeyId() { return keyId_; } + private void initFields() { + inputEventProtoBytes_ = com.google.protobuf.ByteString.EMPTY; + vertexName_ = ""; + destInputName_ = ""; + keyId_ = 0; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; if (!hasInputEventProtoBytes()) { memoizedIsInitialized = 0; @@ -10049,49 +8952,58 @@ public final boolean isInitialized() { return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, inputEventProtoBytes_); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, vertexName_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getVertexNameBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 3, destInputName_); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeBytes(3, getDestInputNameBytes()); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt32(4, keyId_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, inputEventProtoBytes_); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, vertexName_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getVertexNameBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(3, destInputName_); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(3, getDestInputNameBytes()); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(4, keyId_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -10102,37 +9014,40 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent) obj; - if (hasInputEventProtoBytes() != other.hasInputEventProtoBytes()) return false; + boolean result = true; + result = result && (hasInputEventProtoBytes() == other.hasInputEventProtoBytes()); if (hasInputEventProtoBytes()) { - if (!getInputEventProtoBytes() - .equals(other.getInputEventProtoBytes())) return false; + result = result && getInputEventProtoBytes() + .equals(other.getInputEventProtoBytes()); } - if (hasVertexName() != other.hasVertexName()) return false; + result = result && (hasVertexName() == other.hasVertexName()); if (hasVertexName()) { - if (!getVertexName() - .equals(other.getVertexName())) return false; + result = result && getVertexName() + .equals(other.getVertexName()); } - if (hasDestInputName() != other.hasDestInputName()) return false; + result = result && (hasDestInputName() == other.hasDestInputName()); if (hasDestInputName()) { - if (!getDestInputName() - .equals(other.getDestInputName())) return false; + result = result && getDestInputName() + .equals(other.getDestInputName()); } - if (hasKeyId() != other.hasKeyId()) return false; + result = result && (hasKeyId() == other.hasKeyId()); if (hasKeyId()) { - if (getKeyId() - != other.getKeyId()) return false; + result = result && (getKeyId() + == other.getKeyId()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasInputEventProtoBytes()) { hash = (37 * hash) + INPUT_EVENT_PROTO_BYTES_FIELD_NUMBER; hash = (53 * hash) + getInputEventProtoBytes().hashCode(); @@ -10149,22 +9064,11 @@ public int hashCode() { hash = (37 * hash) + KEY_ID_FIELD_NUMBER; hash = (53 * hash) + getKeyId(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -10188,63 +9092,52 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.No } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** + * Protobuf type {@code NotTezEvent} + * *
      **
      * Tez API implementation derives an enum value from instanceof on the event, then uses that enum
@@ -10253,20 +9146,16 @@ protected Builder newBuilderForType(
      * three times over to add anything there. So, we'd do our own "inspired" serialization.
      * Eventually we'll move away from events for API.
      * 
- * - * Protobuf type {@code NotTezEvent} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:NotTezEvent) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEventOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEventOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_NotTezEvent_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_NotTezEvent_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -10279,16 +9168,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); inputEventProtoBytes_ = com.google.protobuf.ByteString.EMPTY; @@ -10302,18 +9193,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_NotTezEvent_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent result = buildPartial(); if (!result.isInitialized()) { @@ -10322,65 +9214,31 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEve return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.inputEventProtoBytes_ = inputEventProtoBytes_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.vertexName_ = vertexName_; - if (((from_bitField0_ & 0x00000004) != 0)) { + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.destInputName_ = destInputName_; - if (((from_bitField0_ & 0x00000008) != 0)) { - result.keyId_ = keyId_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } + result.keyId_ = keyId_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent)other); @@ -10408,26 +9266,26 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasKeyId()) { setKeyId(other.getKeyId()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { if (!hasInputEventProtoBytes()) { + return false; } if (!hasVertexName()) { + return false; } if (!hasDestInputName()) { + return false; } return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -10437,7 +9295,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -10447,27 +9305,22 @@ public Builder mergeFrom( } private int bitField0_; + // required bytes input_event_proto_bytes = 1; private com.google.protobuf.ByteString inputEventProtoBytes_ = com.google.protobuf.ByteString.EMPTY; /** * required bytes input_event_proto_bytes = 1; - * @return Whether the inputEventProtoBytes field is set. */ - @java.lang.Override public boolean hasInputEventProtoBytes() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required bytes input_event_proto_bytes = 1; - * @return The inputEventProtoBytes. */ - @java.lang.Override public com.google.protobuf.ByteString getInputEventProtoBytes() { return inputEventProtoBytes_; } /** * required bytes input_event_proto_bytes = 1; - * @param value The inputEventProtoBytes to set. - * @return This builder for chaining. */ public Builder setInputEventProtoBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -10480,7 +9333,6 @@ public Builder setInputEventProtoBytes(com.google.protobuf.ByteString value) { } /** * required bytes input_event_proto_bytes = 1; - * @return This builder for chaining. */ public Builder clearInputEventProtoBytes() { bitField0_ = (bitField0_ & ~0x00000001); @@ -10489,27 +9341,23 @@ public Builder clearInputEventProtoBytes() { return this; } + // required string vertex_name = 2; private java.lang.Object vertexName_ = ""; /** * required string vertex_name = 2; - * @return Whether the vertexName field is set. */ public boolean hasVertexName() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required string vertex_name = 2; - * @return The vertexName. */ public java.lang.String getVertexName() { java.lang.Object ref = vertexName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - vertexName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + vertexName_ = s; return s; } else { return (java.lang.String) ref; @@ -10517,7 +9365,6 @@ public java.lang.String getVertexName() { } /** * required string vertex_name = 2; - * @return The bytes for vertexName. */ public com.google.protobuf.ByteString getVertexNameBytes() { @@ -10534,8 +9381,6 @@ public java.lang.String getVertexName() { } /** * required string vertex_name = 2; - * @param value The vertexName to set. - * @return This builder for chaining. */ public Builder setVertexName( java.lang.String value) { @@ -10549,7 +9394,6 @@ public Builder setVertexName( } /** * required string vertex_name = 2; - * @return This builder for chaining. */ public Builder clearVertexName() { bitField0_ = (bitField0_ & ~0x00000002); @@ -10559,8 +9403,6 @@ public Builder clearVertexName() { } /** * required string vertex_name = 2; - * @param value The bytes for vertexName to set. - * @return This builder for chaining. */ public Builder setVertexNameBytes( com.google.protobuf.ByteString value) { @@ -10573,27 +9415,23 @@ public Builder setVertexNameBytes( return this; } + // required string dest_input_name = 3; private java.lang.Object destInputName_ = ""; /** * required string dest_input_name = 3; - * @return Whether the destInputName field is set. */ public boolean hasDestInputName() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * required string dest_input_name = 3; - * @return The destInputName. */ public java.lang.String getDestInputName() { java.lang.Object ref = destInputName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - destInputName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + destInputName_ = s; return s; } else { return (java.lang.String) ref; @@ -10601,7 +9439,6 @@ public java.lang.String getDestInputName() { } /** * required string dest_input_name = 3; - * @return The bytes for destInputName. */ public com.google.protobuf.ByteString getDestInputNameBytes() { @@ -10618,8 +9455,6 @@ public java.lang.String getDestInputName() { } /** * required string dest_input_name = 3; - * @param value The destInputName to set. - * @return This builder for chaining. */ public Builder setDestInputName( java.lang.String value) { @@ -10633,7 +9468,6 @@ public Builder setDestInputName( } /** * required string dest_input_name = 3; - * @return This builder for chaining. */ public Builder clearDestInputName() { bitField0_ = (bitField0_ & ~0x00000004); @@ -10643,8 +9477,6 @@ public Builder clearDestInputName() { } /** * required string dest_input_name = 3; - * @param value The bytes for destInputName to set. - * @return This builder for chaining. */ public Builder setDestInputNameBytes( com.google.protobuf.ByteString value) { @@ -10657,27 +9489,22 @@ public Builder setDestInputNameBytes( return this; } + // optional int32 key_id = 4; private int keyId_ ; /** * optional int32 key_id = 4; - * @return Whether the keyId field is set. */ - @java.lang.Override public boolean hasKeyId() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int32 key_id = 4; - * @return The keyId. */ - @java.lang.Override public int getKeyId() { return keyId_; } /** * optional int32 key_id = 4; - * @param value The keyId to set. - * @return This builder for chaining. */ public Builder setKeyId(int value) { bitField0_ |= 0x00000008; @@ -10687,7 +9514,6 @@ public Builder setKeyId(int value) { } /** * optional int32 key_id = 4; - * @return This builder for chaining. */ public Builder clearKeyId() { bitField0_ = (bitField0_ & ~0x00000008); @@ -10695,71 +9521,28 @@ public Builder clearKeyId() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:NotTezEvent) } - // @@protoc_insertion_point(class_scope:NotTezEvent) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public NotTezEvent parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new NotTezEvent(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.NotTezEvent getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new NotTezEvent(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:NotTezEvent) } - public interface SubmitWorkRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:SubmitWorkRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface SubmitWorkRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .VertexOrBinary work_spec = 1; /** * optional .VertexOrBinary work_spec = 1; - * @return Whether the workSpec field is set. */ boolean hasWorkSpec(); /** * optional .VertexOrBinary work_spec = 1; - * @return The workSpec. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary getWorkSpec(); /** @@ -10767,196 +9550,180 @@ public interface SubmitWorkRequestProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder getWorkSpecOrBuilder(); + // optional bytes work_spec_signature = 2; /** * optional bytes work_spec_signature = 2; - * @return Whether the workSpecSignature field is set. */ boolean hasWorkSpecSignature(); /** * optional bytes work_spec_signature = 2; - * @return The workSpecSignature. */ com.google.protobuf.ByteString getWorkSpecSignature(); + // optional int32 fragment_number = 3; /** * optional int32 fragment_number = 3; - * @return Whether the fragmentNumber field is set. */ boolean hasFragmentNumber(); /** * optional int32 fragment_number = 3; - * @return The fragmentNumber. */ int getFragmentNumber(); + // optional int32 attempt_number = 4; /** * optional int32 attempt_number = 4; - * @return Whether the attemptNumber field is set. */ boolean hasAttemptNumber(); /** * optional int32 attempt_number = 4; - * @return The attemptNumber. */ int getAttemptNumber(); + // optional string container_id_string = 5; /** * optional string container_id_string = 5; - * @return Whether the containerIdString field is set. */ boolean hasContainerIdString(); /** * optional string container_id_string = 5; - * @return The containerIdString. */ java.lang.String getContainerIdString(); /** * optional string container_id_string = 5; - * @return The bytes for containerIdString. */ com.google.protobuf.ByteString getContainerIdStringBytes(); + // optional string am_host = 6; /** * optional string am_host = 6; - * @return Whether the amHost field is set. */ boolean hasAmHost(); /** * optional string am_host = 6; - * @return The amHost. */ java.lang.String getAmHost(); /** * optional string am_host = 6; - * @return The bytes for amHost. */ com.google.protobuf.ByteString getAmHostBytes(); + // optional int32 am_port = 7; /** * optional int32 am_port = 7; - * @return Whether the amPort field is set. */ boolean hasAmPort(); /** * optional int32 am_port = 7; - * @return The amPort. */ int getAmPort(); + // optional bytes credentials_binary = 8; /** + * optional bytes credentials_binary = 8; + * *
      * Credentials are not signed - the client can add e.g. his own HDFS tokens.
      * 
- * - * optional bytes credentials_binary = 8; - * @return Whether the credentialsBinary field is set. */ boolean hasCredentialsBinary(); /** + * optional bytes credentials_binary = 8; + * *
      * Credentials are not signed - the client can add e.g. his own HDFS tokens.
      * 
- * - * optional bytes credentials_binary = 8; - * @return The credentialsBinary. */ com.google.protobuf.ByteString getCredentialsBinary(); + // optional .FragmentRuntimeInfo fragment_runtime_info = 9; /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
      * Not supported/honored for external clients right now.
      * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; - * @return Whether the fragmentRuntimeInfo field is set. */ boolean hasFragmentRuntimeInfo(); /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
      * Not supported/honored for external clients right now.
      * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; - * @return The fragmentRuntimeInfo. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo getFragmentRuntimeInfo(); /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
      * Not supported/honored for external clients right now.
      * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder getFragmentRuntimeInfoOrBuilder(); + // optional bytes initial_event_bytes = 10; /** + * optional bytes initial_event_bytes = 10; + * *
      * Serialized (and signed) NotTezEvent; used only for external clients for now.
      * 
- * - * optional bytes initial_event_bytes = 10; - * @return Whether the initialEventBytes field is set. */ boolean hasInitialEventBytes(); /** + * optional bytes initial_event_bytes = 10; + * *
      * Serialized (and signed) NotTezEvent; used only for external clients for now.
      * 
- * - * optional bytes initial_event_bytes = 10; - * @return The initialEventBytes. */ com.google.protobuf.ByteString getInitialEventBytes(); + // optional bytes initial_event_signature = 11; /** * optional bytes initial_event_signature = 11; - * @return Whether the initialEventSignature field is set. */ boolean hasInitialEventSignature(); /** * optional bytes initial_event_signature = 11; - * @return The initialEventSignature. */ com.google.protobuf.ByteString getInitialEventSignature(); + // optional bool is_guaranteed = 12 [default = false]; /** * optional bool is_guaranteed = 12 [default = false]; - * @return Whether the isGuaranteed field is set. */ boolean hasIsGuaranteed(); /** * optional bool is_guaranteed = 12 [default = false]; - * @return The isGuaranteed. */ boolean getIsGuaranteed(); + // optional string jwt = 13; /** * optional string jwt = 13; - * @return Whether the jwt field is set. */ boolean hasJwt(); /** * optional string jwt = 13; - * @return The jwt. */ java.lang.String getJwt(); /** * optional string jwt = 13; - * @return The bytes for jwt. */ com.google.protobuf.ByteString getJwtBytes(); + // optional bool is_external_client_request = 14 [default = false]; /** * optional bool is_external_client_request = 14 [default = false]; - * @return Whether the isExternalClientRequest field is set. */ boolean hasIsExternalClientRequest(); /** * optional bool is_external_client_request = 14 [default = false]; - * @return The isExternalClientRequest. */ boolean getIsExternalClientRequest(); } @@ -10964,44 +9731,35 @@ public interface SubmitWorkRequestProtoOrBuilder extends * Protobuf type {@code SubmitWorkRequestProto} */ public static final class SubmitWorkRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SubmitWorkRequestProto) - SubmitWorkRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SubmitWorkRequestProtoOrBuilder { // Use SubmitWorkRequestProto.newBuilder() to construct. - private SubmitWorkRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SubmitWorkRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SubmitWorkRequestProto() { - workSpecSignature_ = com.google.protobuf.ByteString.EMPTY; - containerIdString_ = ""; - amHost_ = ""; - credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; - initialEventBytes_ = com.google.protobuf.ByteString.EMPTY; - initialEventSignature_ = com.google.protobuf.ByteString.EMPTY; - jwt_ = ""; + private SubmitWorkRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SubmitWorkRequestProto defaultInstance; + public static SubmitWorkRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SubmitWorkRequestProto(); + public SubmitWorkRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SubmitWorkRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -11013,9 +9771,16 @@ private SubmitWorkRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = workSpec_.toBuilder(); } workSpec_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.PARSER, extensionRegistry); @@ -11042,15 +9807,13 @@ private SubmitWorkRequestProto( break; } case 42: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000010; - containerIdString_ = bs; + containerIdString_ = input.readBytes(); break; } case 50: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; - amHost_ = bs; + amHost_ = input.readBytes(); break; } case 56: { @@ -11065,7 +9828,7 @@ private SubmitWorkRequestProto( } case 74: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder subBuilder = null; - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { subBuilder = fragmentRuntimeInfo_.toBuilder(); } fragmentRuntimeInfo_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.PARSER, extensionRegistry); @@ -11092,9 +9855,8 @@ private SubmitWorkRequestProto( break; } case 106: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00001000; - jwt_ = bs; + jwt_ = input.readBytes(); break; } case 112: { @@ -11102,22 +9864,13 @@ private SubmitWorkRequestProto( isExternalClientRequest_ = input.readBool(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -11128,113 +9881,111 @@ private SubmitWorkRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SubmitWorkRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SubmitWorkRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .VertexOrBinary work_spec = 1; public static final int WORK_SPEC_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary workSpec_; /** * optional .VertexOrBinary work_spec = 1; - * @return Whether the workSpec field is set. */ - @java.lang.Override public boolean hasWorkSpec() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .VertexOrBinary work_spec = 1; - * @return The workSpec. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary getWorkSpec() { - return workSpec_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance() : workSpec_; + return workSpec_; } /** * optional .VertexOrBinary work_spec = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder getWorkSpecOrBuilder() { - return workSpec_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance() : workSpec_; + return workSpec_; } + // optional bytes work_spec_signature = 2; public static final int WORK_SPEC_SIGNATURE_FIELD_NUMBER = 2; private com.google.protobuf.ByteString workSpecSignature_; /** * optional bytes work_spec_signature = 2; - * @return Whether the workSpecSignature field is set. */ - @java.lang.Override public boolean hasWorkSpecSignature() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes work_spec_signature = 2; - * @return The workSpecSignature. */ - @java.lang.Override public com.google.protobuf.ByteString getWorkSpecSignature() { return workSpecSignature_; } + // optional int32 fragment_number = 3; public static final int FRAGMENT_NUMBER_FIELD_NUMBER = 3; private int fragmentNumber_; /** * optional int32 fragment_number = 3; - * @return Whether the fragmentNumber field is set. */ - @java.lang.Override public boolean hasFragmentNumber() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 fragment_number = 3; - * @return The fragmentNumber. */ - @java.lang.Override public int getFragmentNumber() { return fragmentNumber_; } + // optional int32 attempt_number = 4; public static final int ATTEMPT_NUMBER_FIELD_NUMBER = 4; private int attemptNumber_; /** * optional int32 attempt_number = 4; - * @return Whether the attemptNumber field is set. */ - @java.lang.Override public boolean hasAttemptNumber() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int32 attempt_number = 4; - * @return The attemptNumber. */ - @java.lang.Override public int getAttemptNumber() { return attemptNumber_; } + // optional string container_id_string = 5; public static final int CONTAINER_ID_STRING_FIELD_NUMBER = 5; - private volatile java.lang.Object containerIdString_; + private java.lang.Object containerIdString_; /** * optional string container_id_string = 5; - * @return Whether the containerIdString field is set. */ - @java.lang.Override public boolean hasContainerIdString() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional string container_id_string = 5; - * @return The containerIdString. */ - @java.lang.Override public java.lang.String getContainerIdString() { java.lang.Object ref = containerIdString_; if (ref instanceof java.lang.String) { @@ -11251,9 +10002,7 @@ public java.lang.String getContainerIdString() { } /** * optional string container_id_string = 5; - * @return The bytes for containerIdString. */ - @java.lang.Override public com.google.protobuf.ByteString getContainerIdStringBytes() { java.lang.Object ref = containerIdString_; @@ -11268,21 +10017,18 @@ public java.lang.String getContainerIdString() { } } + // optional string am_host = 6; public static final int AM_HOST_FIELD_NUMBER = 6; - private volatile java.lang.Object amHost_; + private java.lang.Object amHost_; /** * optional string am_host = 6; - * @return Whether the amHost field is set. */ - @java.lang.Override public boolean hasAmHost() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string am_host = 6; - * @return The amHost. */ - @java.lang.Override public java.lang.String getAmHost() { java.lang.Object ref = amHost_; if (ref instanceof java.lang.String) { @@ -11299,9 +10045,7 @@ public java.lang.String getAmHost() { } /** * optional string am_host = 6; - * @return The bytes for amHost. */ - @java.lang.Override public com.google.protobuf.ByteString getAmHostBytes() { java.lang.Object ref = amHost_; @@ -11316,170 +10060,148 @@ public java.lang.String getAmHost() { } } + // optional int32 am_port = 7; public static final int AM_PORT_FIELD_NUMBER = 7; private int amPort_; /** * optional int32 am_port = 7; - * @return Whether the amPort field is set. */ - @java.lang.Override public boolean hasAmPort() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional int32 am_port = 7; - * @return The amPort. */ - @java.lang.Override public int getAmPort() { return amPort_; } + // optional bytes credentials_binary = 8; public static final int CREDENTIALS_BINARY_FIELD_NUMBER = 8; private com.google.protobuf.ByteString credentialsBinary_; /** + * optional bytes credentials_binary = 8; + * *
      * Credentials are not signed - the client can add e.g. his own HDFS tokens.
      * 
- * - * optional bytes credentials_binary = 8; - * @return Whether the credentialsBinary field is set. */ - @java.lang.Override public boolean hasCredentialsBinary() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** + * optional bytes credentials_binary = 8; + * *
      * Credentials are not signed - the client can add e.g. his own HDFS tokens.
      * 
- * - * optional bytes credentials_binary = 8; - * @return The credentialsBinary. */ - @java.lang.Override public com.google.protobuf.ByteString getCredentialsBinary() { return credentialsBinary_; } + // optional .FragmentRuntimeInfo fragment_runtime_info = 9; public static final int FRAGMENT_RUNTIME_INFO_FIELD_NUMBER = 9; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo fragmentRuntimeInfo_; /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
      * Not supported/honored for external clients right now.
      * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; - * @return Whether the fragmentRuntimeInfo field is set. */ - @java.lang.Override public boolean hasFragmentRuntimeInfo() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
      * Not supported/honored for external clients right now.
      * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; - * @return The fragmentRuntimeInfo. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo getFragmentRuntimeInfo() { - return fragmentRuntimeInfo_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance() : fragmentRuntimeInfo_; + return fragmentRuntimeInfo_; } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
      * Not supported/honored for external clients right now.
      * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder getFragmentRuntimeInfoOrBuilder() { - return fragmentRuntimeInfo_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance() : fragmentRuntimeInfo_; + return fragmentRuntimeInfo_; } + // optional bytes initial_event_bytes = 10; public static final int INITIAL_EVENT_BYTES_FIELD_NUMBER = 10; private com.google.protobuf.ByteString initialEventBytes_; /** + * optional bytes initial_event_bytes = 10; + * *
      * Serialized (and signed) NotTezEvent; used only for external clients for now.
      * 
- * - * optional bytes initial_event_bytes = 10; - * @return Whether the initialEventBytes field is set. */ - @java.lang.Override public boolean hasInitialEventBytes() { - return ((bitField0_ & 0x00000200) != 0); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** + * optional bytes initial_event_bytes = 10; + * *
      * Serialized (and signed) NotTezEvent; used only for external clients for now.
      * 
- * - * optional bytes initial_event_bytes = 10; - * @return The initialEventBytes. */ - @java.lang.Override public com.google.protobuf.ByteString getInitialEventBytes() { return initialEventBytes_; } + // optional bytes initial_event_signature = 11; public static final int INITIAL_EVENT_SIGNATURE_FIELD_NUMBER = 11; private com.google.protobuf.ByteString initialEventSignature_; /** * optional bytes initial_event_signature = 11; - * @return Whether the initialEventSignature field is set. */ - @java.lang.Override public boolean hasInitialEventSignature() { - return ((bitField0_ & 0x00000400) != 0); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional bytes initial_event_signature = 11; - * @return The initialEventSignature. */ - @java.lang.Override public com.google.protobuf.ByteString getInitialEventSignature() { return initialEventSignature_; } + // optional bool is_guaranteed = 12 [default = false]; public static final int IS_GUARANTEED_FIELD_NUMBER = 12; private boolean isGuaranteed_; /** * optional bool is_guaranteed = 12 [default = false]; - * @return Whether the isGuaranteed field is set. */ - @java.lang.Override public boolean hasIsGuaranteed() { - return ((bitField0_ & 0x00000800) != 0); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional bool is_guaranteed = 12 [default = false]; - * @return The isGuaranteed. */ - @java.lang.Override public boolean getIsGuaranteed() { return isGuaranteed_; } + // optional string jwt = 13; public static final int JWT_FIELD_NUMBER = 13; - private volatile java.lang.Object jwt_; + private java.lang.Object jwt_; /** * optional string jwt = 13; - * @return Whether the jwt field is set. */ - @java.lang.Override public boolean hasJwt() { - return ((bitField0_ & 0x00001000) != 0); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional string jwt = 13; - * @return The jwt. */ - @java.lang.Override public java.lang.String getJwt() { java.lang.Object ref = jwt_; if (ref instanceof java.lang.String) { @@ -11496,9 +10218,7 @@ public java.lang.String getJwt() { } /** * optional string jwt = 13; - * @return The bytes for jwt. */ - @java.lang.Override public com.google.protobuf.ByteString getJwtBytes() { java.lang.Object ref = jwt_; @@ -11513,148 +10233,169 @@ public java.lang.String getJwt() { } } + // optional bool is_external_client_request = 14 [default = false]; public static final int IS_EXTERNAL_CLIENT_REQUEST_FIELD_NUMBER = 14; private boolean isExternalClientRequest_; /** * optional bool is_external_client_request = 14 [default = false]; - * @return Whether the isExternalClientRequest field is set. */ - @java.lang.Override public boolean hasIsExternalClientRequest() { - return ((bitField0_ & 0x00002000) != 0); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional bool is_external_client_request = 14 [default = false]; - * @return The isExternalClientRequest. */ - @java.lang.Override public boolean getIsExternalClientRequest() { return isExternalClientRequest_; } + private void initFields() { + workSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance(); + workSpecSignature_ = com.google.protobuf.ByteString.EMPTY; + fragmentNumber_ = 0; + attemptNumber_ = 0; + containerIdString_ = ""; + amHost_ = ""; + amPort_ = 0; + credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; + fragmentRuntimeInfo_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance(); + initialEventBytes_ = com.google.protobuf.ByteString.EMPTY; + initialEventSignature_ = com.google.protobuf.ByteString.EMPTY; + isGuaranteed_ = false; + jwt_ = ""; + isExternalClientRequest_ = false; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getWorkSpec()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, workSpec_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, workSpecSignature_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, fragmentNumber_); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt32(4, attemptNumber_); } - if (((bitField0_ & 0x00000010) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 5, containerIdString_); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getContainerIdStringBytes()); } - if (((bitField0_ & 0x00000020) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 6, amHost_); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getAmHostBytes()); } - if (((bitField0_ & 0x00000040) != 0)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeInt32(7, amPort_); } - if (((bitField0_ & 0x00000080) != 0)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeBytes(8, credentialsBinary_); } - if (((bitField0_ & 0x00000100) != 0)) { - output.writeMessage(9, getFragmentRuntimeInfo()); + if (((bitField0_ & 0x00000100) == 0x00000100)) { + output.writeMessage(9, fragmentRuntimeInfo_); } - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeBytes(10, initialEventBytes_); } - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeBytes(11, initialEventSignature_); } - if (((bitField0_ & 0x00000800) != 0)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { output.writeBool(12, isGuaranteed_); } - if (((bitField0_ & 0x00001000) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 13, jwt_); + if (((bitField0_ & 0x00001000) == 0x00001000)) { + output.writeBytes(13, getJwtBytes()); } - if (((bitField0_ & 0x00002000) != 0)) { + if (((bitField0_ & 0x00002000) == 0x00002000)) { output.writeBool(14, isExternalClientRequest_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getWorkSpec()); + .computeMessageSize(1, workSpec_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, workSpecSignature_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, fragmentNumber_); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(4, attemptNumber_); } - if (((bitField0_ & 0x00000010) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, containerIdString_); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getContainerIdStringBytes()); } - if (((bitField0_ & 0x00000020) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, amHost_); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getAmHostBytes()); } - if (((bitField0_ & 0x00000040) != 0)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(7, amPort_); } - if (((bitField0_ & 0x00000080) != 0)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(8, credentialsBinary_); } - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(9, getFragmentRuntimeInfo()); + .computeMessageSize(9, fragmentRuntimeInfo_); } - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(10, initialEventBytes_); } - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(11, initialEventSignature_); } - if (((bitField0_ & 0x00000800) != 0)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(12, isGuaranteed_); } - if (((bitField0_ & 0x00001000) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(13, jwt_); + if (((bitField0_ & 0x00001000) == 0x00001000)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(13, getJwtBytes()); } - if (((bitField0_ & 0x00002000) != 0)) { + if (((bitField0_ & 0x00002000) == 0x00002000)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(14, isExternalClientRequest_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -11665,87 +10406,90 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) obj; - if (hasWorkSpec() != other.hasWorkSpec()) return false; + boolean result = true; + result = result && (hasWorkSpec() == other.hasWorkSpec()); if (hasWorkSpec()) { - if (!getWorkSpec() - .equals(other.getWorkSpec())) return false; + result = result && getWorkSpec() + .equals(other.getWorkSpec()); } - if (hasWorkSpecSignature() != other.hasWorkSpecSignature()) return false; + result = result && (hasWorkSpecSignature() == other.hasWorkSpecSignature()); if (hasWorkSpecSignature()) { - if (!getWorkSpecSignature() - .equals(other.getWorkSpecSignature())) return false; + result = result && getWorkSpecSignature() + .equals(other.getWorkSpecSignature()); } - if (hasFragmentNumber() != other.hasFragmentNumber()) return false; + result = result && (hasFragmentNumber() == other.hasFragmentNumber()); if (hasFragmentNumber()) { - if (getFragmentNumber() - != other.getFragmentNumber()) return false; + result = result && (getFragmentNumber() + == other.getFragmentNumber()); } - if (hasAttemptNumber() != other.hasAttemptNumber()) return false; + result = result && (hasAttemptNumber() == other.hasAttemptNumber()); if (hasAttemptNumber()) { - if (getAttemptNumber() - != other.getAttemptNumber()) return false; + result = result && (getAttemptNumber() + == other.getAttemptNumber()); } - if (hasContainerIdString() != other.hasContainerIdString()) return false; + result = result && (hasContainerIdString() == other.hasContainerIdString()); if (hasContainerIdString()) { - if (!getContainerIdString() - .equals(other.getContainerIdString())) return false; + result = result && getContainerIdString() + .equals(other.getContainerIdString()); } - if (hasAmHost() != other.hasAmHost()) return false; + result = result && (hasAmHost() == other.hasAmHost()); if (hasAmHost()) { - if (!getAmHost() - .equals(other.getAmHost())) return false; + result = result && getAmHost() + .equals(other.getAmHost()); } - if (hasAmPort() != other.hasAmPort()) return false; + result = result && (hasAmPort() == other.hasAmPort()); if (hasAmPort()) { - if (getAmPort() - != other.getAmPort()) return false; + result = result && (getAmPort() + == other.getAmPort()); } - if (hasCredentialsBinary() != other.hasCredentialsBinary()) return false; + result = result && (hasCredentialsBinary() == other.hasCredentialsBinary()); if (hasCredentialsBinary()) { - if (!getCredentialsBinary() - .equals(other.getCredentialsBinary())) return false; + result = result && getCredentialsBinary() + .equals(other.getCredentialsBinary()); } - if (hasFragmentRuntimeInfo() != other.hasFragmentRuntimeInfo()) return false; + result = result && (hasFragmentRuntimeInfo() == other.hasFragmentRuntimeInfo()); if (hasFragmentRuntimeInfo()) { - if (!getFragmentRuntimeInfo() - .equals(other.getFragmentRuntimeInfo())) return false; + result = result && getFragmentRuntimeInfo() + .equals(other.getFragmentRuntimeInfo()); } - if (hasInitialEventBytes() != other.hasInitialEventBytes()) return false; + result = result && (hasInitialEventBytes() == other.hasInitialEventBytes()); if (hasInitialEventBytes()) { - if (!getInitialEventBytes() - .equals(other.getInitialEventBytes())) return false; + result = result && getInitialEventBytes() + .equals(other.getInitialEventBytes()); } - if (hasInitialEventSignature() != other.hasInitialEventSignature()) return false; + result = result && (hasInitialEventSignature() == other.hasInitialEventSignature()); if (hasInitialEventSignature()) { - if (!getInitialEventSignature() - .equals(other.getInitialEventSignature())) return false; + result = result && getInitialEventSignature() + .equals(other.getInitialEventSignature()); } - if (hasIsGuaranteed() != other.hasIsGuaranteed()) return false; + result = result && (hasIsGuaranteed() == other.hasIsGuaranteed()); if (hasIsGuaranteed()) { - if (getIsGuaranteed() - != other.getIsGuaranteed()) return false; + result = result && (getIsGuaranteed() + == other.getIsGuaranteed()); } - if (hasJwt() != other.hasJwt()) return false; + result = result && (hasJwt() == other.hasJwt()); if (hasJwt()) { - if (!getJwt() - .equals(other.getJwt())) return false; + result = result && getJwt() + .equals(other.getJwt()); } - if (hasIsExternalClientRequest() != other.hasIsExternalClientRequest()) return false; + result = result && (hasIsExternalClientRequest() == other.hasIsExternalClientRequest()); if (hasIsExternalClientRequest()) { - if (getIsExternalClientRequest() - != other.getIsExternalClientRequest()) return false; + result = result && (getIsExternalClientRequest() + == other.getIsExternalClientRequest()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasWorkSpec()) { hash = (37 * hash) + WORK_SPEC_FIELD_NUMBER; hash = (53 * hash) + getWorkSpec().hashCode(); @@ -11792,8 +10536,7 @@ public int hashCode() { } if (hasIsGuaranteed()) { hash = (37 * hash) + IS_GUARANTEED_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getIsGuaranteed()); + hash = (53 * hash) + hashBoolean(getIsGuaranteed()); } if (hasJwt()) { hash = (37 * hash) + JWT_FIELD_NUMBER; @@ -11801,25 +10544,13 @@ public int hashCode() { } if (hasIsExternalClientRequest()) { hash = (37 * hash) + IS_EXTERNAL_CLIENT_REQUEST_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getIsExternalClientRequest()); + hash = (53 * hash) + hashBoolean(getIsExternalClientRequest()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -11843,59 +10574,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Su } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -11903,16 +10621,14 @@ protected Builder newBuilderForType( * Protobuf type {@code SubmitWorkRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SubmitWorkRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -11925,22 +10641,24 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getWorkSpecFieldBuilder(); getFragmentRuntimeInfoFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (workSpecBuilder_ == null) { - workSpec_ = null; + workSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance(); } else { workSpecBuilder_.clear(); } @@ -11960,7 +10678,7 @@ public Builder clear() { credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; bitField0_ = (bitField0_ & ~0x00000080); if (fragmentRuntimeInfoBuilder_ == null) { - fragmentRuntimeInfo_ = null; + fragmentRuntimeInfo_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance(); } else { fragmentRuntimeInfoBuilder_.clear(); } @@ -11978,18 +10696,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -11998,113 +10717,79 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (workSpecBuilder_ == null) { - result.workSpec_ = workSpec_; - } else { - result.workSpec_ = workSpecBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { + if (workSpecBuilder_ == null) { + result.workSpec_ = workSpec_; + } else { + result.workSpec_ = workSpecBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.workSpecSignature_ = workSpecSignature_; - if (((from_bitField0_ & 0x00000004) != 0)) { - result.fragmentNumber_ = fragmentNumber_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - if (((from_bitField0_ & 0x00000008) != 0)) { - result.attemptNumber_ = attemptNumber_; + result.fragmentNumber_ = fragmentNumber_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - if (((from_bitField0_ & 0x00000010) != 0)) { + result.attemptNumber_ = attemptNumber_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.containerIdString_ = containerIdString_; - if (((from_bitField0_ & 0x00000020) != 0)) { + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.amHost_ = amHost_; - if (((from_bitField0_ & 0x00000040) != 0)) { - result.amPort_ = amPort_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } - if (((from_bitField0_ & 0x00000080) != 0)) { + result.amPort_ = amPort_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.credentialsBinary_ = credentialsBinary_; - if (((from_bitField0_ & 0x00000100) != 0)) { - if (fragmentRuntimeInfoBuilder_ == null) { - result.fragmentRuntimeInfo_ = fragmentRuntimeInfo_; - } else { - result.fragmentRuntimeInfo_ = fragmentRuntimeInfoBuilder_.build(); - } + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000100; } - if (((from_bitField0_ & 0x00000200) != 0)) { + if (fragmentRuntimeInfoBuilder_ == null) { + result.fragmentRuntimeInfo_ = fragmentRuntimeInfo_; + } else { + result.fragmentRuntimeInfo_ = fragmentRuntimeInfoBuilder_.build(); + } + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000200; } result.initialEventBytes_ = initialEventBytes_; - if (((from_bitField0_ & 0x00000400) != 0)) { + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000400; } result.initialEventSignature_ = initialEventSignature_; - if (((from_bitField0_ & 0x00000800) != 0)) { - result.isGuaranteed_ = isGuaranteed_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000800; } - if (((from_bitField0_ & 0x00001000) != 0)) { + result.isGuaranteed_ = isGuaranteed_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { to_bitField0_ |= 0x00001000; } result.jwt_ = jwt_; - if (((from_bitField0_ & 0x00002000) != 0)) { - result.isExternalClientRequest_ = isExternalClientRequest_; + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { to_bitField0_ |= 0x00002000; } + result.isExternalClientRequest_ = isExternalClientRequest_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto)other); @@ -12164,17 +10849,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasIsExternalClientRequest()) { setIsExternalClientRequest(other.getIsExternalClientRequest()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -12184,7 +10866,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -12194,23 +10876,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary workSpec_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .VertexOrBinary work_spec = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary workSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder> workSpecBuilder_; /** * optional .VertexOrBinary work_spec = 1; - * @return Whether the workSpec field is set. */ public boolean hasWorkSpec() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .VertexOrBinary work_spec = 1; - * @return The workSpec. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary getWorkSpec() { if (workSpecBuilder_ == null) { - return workSpec_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance() : workSpec_; + return workSpec_; } else { return workSpecBuilder_.getMessage(); } @@ -12250,8 +10931,7 @@ public Builder setWorkSpec( */ public Builder mergeWorkSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary value) { if (workSpecBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - workSpec_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && workSpec_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance()) { workSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.newBuilder(workSpec_).mergeFrom(value).buildPartial(); @@ -12270,7 +10950,7 @@ public Builder mergeWorkSpec(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonPr */ public Builder clearWorkSpec() { if (workSpecBuilder_ == null) { - workSpec_ = null; + workSpec_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance(); onChanged(); } else { workSpecBuilder_.clear(); @@ -12293,20 +10973,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrB if (workSpecBuilder_ != null) { return workSpecBuilder_.getMessageOrBuilder(); } else { - return workSpec_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.getDefaultInstance() : workSpec_; + return workSpec_; } } /** * optional .VertexOrBinary work_spec = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder> getWorkSpecFieldBuilder() { if (workSpecBuilder_ == null) { - workSpecBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + workSpecBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinary.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrBinaryOrBuilder>( - getWorkSpec(), + workSpec_, getParentForChildren(), isClean()); workSpec_ = null; @@ -12314,27 +10993,22 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.VertexOrB return workSpecBuilder_; } + // optional bytes work_spec_signature = 2; private com.google.protobuf.ByteString workSpecSignature_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes work_spec_signature = 2; - * @return Whether the workSpecSignature field is set. */ - @java.lang.Override public boolean hasWorkSpecSignature() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes work_spec_signature = 2; - * @return The workSpecSignature. */ - @java.lang.Override public com.google.protobuf.ByteString getWorkSpecSignature() { return workSpecSignature_; } /** * optional bytes work_spec_signature = 2; - * @param value The workSpecSignature to set. - * @return This builder for chaining. */ public Builder setWorkSpecSignature(com.google.protobuf.ByteString value) { if (value == null) { @@ -12347,7 +11021,6 @@ public Builder setWorkSpecSignature(com.google.protobuf.ByteString value) { } /** * optional bytes work_spec_signature = 2; - * @return This builder for chaining. */ public Builder clearWorkSpecSignature() { bitField0_ = (bitField0_ & ~0x00000002); @@ -12356,27 +11029,22 @@ public Builder clearWorkSpecSignature() { return this; } + // optional int32 fragment_number = 3; private int fragmentNumber_ ; /** * optional int32 fragment_number = 3; - * @return Whether the fragmentNumber field is set. */ - @java.lang.Override public boolean hasFragmentNumber() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 fragment_number = 3; - * @return The fragmentNumber. */ - @java.lang.Override public int getFragmentNumber() { return fragmentNumber_; } /** * optional int32 fragment_number = 3; - * @param value The fragmentNumber to set. - * @return This builder for chaining. */ public Builder setFragmentNumber(int value) { bitField0_ |= 0x00000004; @@ -12386,7 +11054,6 @@ public Builder setFragmentNumber(int value) { } /** * optional int32 fragment_number = 3; - * @return This builder for chaining. */ public Builder clearFragmentNumber() { bitField0_ = (bitField0_ & ~0x00000004); @@ -12395,27 +11062,22 @@ public Builder clearFragmentNumber() { return this; } + // optional int32 attempt_number = 4; private int attemptNumber_ ; /** * optional int32 attempt_number = 4; - * @return Whether the attemptNumber field is set. */ - @java.lang.Override public boolean hasAttemptNumber() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int32 attempt_number = 4; - * @return The attemptNumber. */ - @java.lang.Override public int getAttemptNumber() { return attemptNumber_; } /** * optional int32 attempt_number = 4; - * @param value The attemptNumber to set. - * @return This builder for chaining. */ public Builder setAttemptNumber(int value) { bitField0_ |= 0x00000008; @@ -12425,7 +11087,6 @@ public Builder setAttemptNumber(int value) { } /** * optional int32 attempt_number = 4; - * @return This builder for chaining. */ public Builder clearAttemptNumber() { bitField0_ = (bitField0_ & ~0x00000008); @@ -12434,27 +11095,23 @@ public Builder clearAttemptNumber() { return this; } + // optional string container_id_string = 5; private java.lang.Object containerIdString_ = ""; /** * optional string container_id_string = 5; - * @return Whether the containerIdString field is set. */ public boolean hasContainerIdString() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional string container_id_string = 5; - * @return The containerIdString. */ public java.lang.String getContainerIdString() { java.lang.Object ref = containerIdString_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - containerIdString_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + containerIdString_ = s; return s; } else { return (java.lang.String) ref; @@ -12462,7 +11119,6 @@ public java.lang.String getContainerIdString() { } /** * optional string container_id_string = 5; - * @return The bytes for containerIdString. */ public com.google.protobuf.ByteString getContainerIdStringBytes() { @@ -12479,8 +11135,6 @@ public java.lang.String getContainerIdString() { } /** * optional string container_id_string = 5; - * @param value The containerIdString to set. - * @return This builder for chaining. */ public Builder setContainerIdString( java.lang.String value) { @@ -12494,7 +11148,6 @@ public Builder setContainerIdString( } /** * optional string container_id_string = 5; - * @return This builder for chaining. */ public Builder clearContainerIdString() { bitField0_ = (bitField0_ & ~0x00000010); @@ -12504,8 +11157,6 @@ public Builder clearContainerIdString() { } /** * optional string container_id_string = 5; - * @param value The bytes for containerIdString to set. - * @return This builder for chaining. */ public Builder setContainerIdStringBytes( com.google.protobuf.ByteString value) { @@ -12518,27 +11169,23 @@ public Builder setContainerIdStringBytes( return this; } + // optional string am_host = 6; private java.lang.Object amHost_ = ""; /** * optional string am_host = 6; - * @return Whether the amHost field is set. */ public boolean hasAmHost() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string am_host = 6; - * @return The amHost. */ public java.lang.String getAmHost() { java.lang.Object ref = amHost_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - amHost_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + amHost_ = s; return s; } else { return (java.lang.String) ref; @@ -12546,7 +11193,6 @@ public java.lang.String getAmHost() { } /** * optional string am_host = 6; - * @return The bytes for amHost. */ public com.google.protobuf.ByteString getAmHostBytes() { @@ -12563,8 +11209,6 @@ public java.lang.String getAmHost() { } /** * optional string am_host = 6; - * @param value The amHost to set. - * @return This builder for chaining. */ public Builder setAmHost( java.lang.String value) { @@ -12578,7 +11222,6 @@ public Builder setAmHost( } /** * optional string am_host = 6; - * @return This builder for chaining. */ public Builder clearAmHost() { bitField0_ = (bitField0_ & ~0x00000020); @@ -12588,8 +11231,6 @@ public Builder clearAmHost() { } /** * optional string am_host = 6; - * @param value The bytes for amHost to set. - * @return This builder for chaining. */ public Builder setAmHostBytes( com.google.protobuf.ByteString value) { @@ -12602,27 +11243,22 @@ public Builder setAmHostBytes( return this; } + // optional int32 am_port = 7; private int amPort_ ; /** * optional int32 am_port = 7; - * @return Whether the amPort field is set. */ - @java.lang.Override public boolean hasAmPort() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional int32 am_port = 7; - * @return The amPort. */ - @java.lang.Override public int getAmPort() { return amPort_; } /** * optional int32 am_port = 7; - * @param value The amPort to set. - * @return This builder for chaining. */ public Builder setAmPort(int value) { bitField0_ |= 0x00000040; @@ -12632,7 +11268,6 @@ public Builder setAmPort(int value) { } /** * optional int32 am_port = 7; - * @return This builder for chaining. */ public Builder clearAmPort() { bitField0_ = (bitField0_ & ~0x00000040); @@ -12641,39 +11276,34 @@ public Builder clearAmPort() { return this; } + // optional bytes credentials_binary = 8; private com.google.protobuf.ByteString credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; /** + * optional bytes credentials_binary = 8; + * *
        * Credentials are not signed - the client can add e.g. his own HDFS tokens.
        * 
- * - * optional bytes credentials_binary = 8; - * @return Whether the credentialsBinary field is set. */ - @java.lang.Override public boolean hasCredentialsBinary() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** + * optional bytes credentials_binary = 8; + * *
        * Credentials are not signed - the client can add e.g. his own HDFS tokens.
        * 
- * - * optional bytes credentials_binary = 8; - * @return The credentialsBinary. */ - @java.lang.Override public com.google.protobuf.ByteString getCredentialsBinary() { return credentialsBinary_; } /** + * optional bytes credentials_binary = 8; + * *
        * Credentials are not signed - the client can add e.g. his own HDFS tokens.
        * 
- * - * optional bytes credentials_binary = 8; - * @param value The credentialsBinary to set. - * @return This builder for chaining. */ public Builder setCredentialsBinary(com.google.protobuf.ByteString value) { if (value == null) { @@ -12685,12 +11315,11 @@ public Builder setCredentialsBinary(com.google.protobuf.ByteString value) { return this; } /** + * optional bytes credentials_binary = 8; + * *
        * Credentials are not signed - the client can add e.g. his own HDFS tokens.
        * 
- * - * optional bytes credentials_binary = 8; - * @return This builder for chaining. */ public Builder clearCredentialsBinary() { bitField0_ = (bitField0_ & ~0x00000080); @@ -12699,41 +11328,40 @@ public Builder clearCredentialsBinary() { return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo fragmentRuntimeInfo_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .FragmentRuntimeInfo fragment_runtime_info = 9; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo fragmentRuntimeInfo_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder> fragmentRuntimeInfoBuilder_; /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; - * @return Whether the fragmentRuntimeInfo field is set. */ public boolean hasFragmentRuntimeInfo() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; - * @return The fragmentRuntimeInfo. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo getFragmentRuntimeInfo() { if (fragmentRuntimeInfoBuilder_ == null) { - return fragmentRuntimeInfo_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance() : fragmentRuntimeInfo_; + return fragmentRuntimeInfo_; } else { return fragmentRuntimeInfoBuilder_.getMessage(); } } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ public Builder setFragmentRuntimeInfo(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo value) { if (fragmentRuntimeInfoBuilder_ == null) { @@ -12749,11 +11377,11 @@ public Builder setFragmentRuntimeInfo(org.apache.hadoop.hive.llap.daemon.rpc.Lla return this; } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ public Builder setFragmentRuntimeInfo( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder builderForValue) { @@ -12767,16 +11395,15 @@ public Builder setFragmentRuntimeInfo( return this; } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ public Builder mergeFragmentRuntimeInfo(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo value) { if (fragmentRuntimeInfoBuilder_ == null) { - if (((bitField0_ & 0x00000100) != 0) && - fragmentRuntimeInfo_ != null && + if (((bitField0_ & 0x00000100) == 0x00000100) && fragmentRuntimeInfo_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance()) { fragmentRuntimeInfo_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.newBuilder(fragmentRuntimeInfo_).mergeFrom(value).buildPartial(); @@ -12791,15 +11418,15 @@ public Builder mergeFragmentRuntimeInfo(org.apache.hadoop.hive.llap.daemon.rpc.L return this; } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ public Builder clearFragmentRuntimeInfo() { if (fragmentRuntimeInfoBuilder_ == null) { - fragmentRuntimeInfo_ = null; + fragmentRuntimeInfo_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance(); onChanged(); } else { fragmentRuntimeInfoBuilder_.clear(); @@ -12808,11 +11435,11 @@ public Builder clearFragmentRuntimeInfo() { return this; } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder getFragmentRuntimeInfoBuilder() { bitField0_ |= 0x00000100; @@ -12820,34 +11447,33 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentR return getFragmentRuntimeInfoFieldBuilder().getBuilder(); } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder getFragmentRuntimeInfoOrBuilder() { if (fragmentRuntimeInfoBuilder_ != null) { return fragmentRuntimeInfoBuilder_.getMessageOrBuilder(); } else { - return fragmentRuntimeInfo_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.getDefaultInstance() : fragmentRuntimeInfo_; + return fragmentRuntimeInfo_; } } /** + * optional .FragmentRuntimeInfo fragment_runtime_info = 9; + * *
        * Not supported/honored for external clients right now.
        * 
- * - * optional .FragmentRuntimeInfo fragment_runtime_info = 9; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder> getFragmentRuntimeInfoFieldBuilder() { if (fragmentRuntimeInfoBuilder_ == null) { - fragmentRuntimeInfoBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + fragmentRuntimeInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfo.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentRuntimeInfoOrBuilder>( - getFragmentRuntimeInfo(), + fragmentRuntimeInfo_, getParentForChildren(), isClean()); fragmentRuntimeInfo_ = null; @@ -12855,39 +11481,34 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.FragmentR return fragmentRuntimeInfoBuilder_; } + // optional bytes initial_event_bytes = 10; private com.google.protobuf.ByteString initialEventBytes_ = com.google.protobuf.ByteString.EMPTY; /** + * optional bytes initial_event_bytes = 10; + * *
        * Serialized (and signed) NotTezEvent; used only for external clients for now.
        * 
- * - * optional bytes initial_event_bytes = 10; - * @return Whether the initialEventBytes field is set. */ - @java.lang.Override public boolean hasInitialEventBytes() { - return ((bitField0_ & 0x00000200) != 0); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** + * optional bytes initial_event_bytes = 10; + * *
        * Serialized (and signed) NotTezEvent; used only for external clients for now.
        * 
- * - * optional bytes initial_event_bytes = 10; - * @return The initialEventBytes. */ - @java.lang.Override public com.google.protobuf.ByteString getInitialEventBytes() { return initialEventBytes_; } /** + * optional bytes initial_event_bytes = 10; + * *
        * Serialized (and signed) NotTezEvent; used only for external clients for now.
        * 
- * - * optional bytes initial_event_bytes = 10; - * @param value The initialEventBytes to set. - * @return This builder for chaining. */ public Builder setInitialEventBytes(com.google.protobuf.ByteString value) { if (value == null) { @@ -12899,12 +11520,11 @@ public Builder setInitialEventBytes(com.google.protobuf.ByteString value) { return this; } /** + * optional bytes initial_event_bytes = 10; + * *
        * Serialized (and signed) NotTezEvent; used only for external clients for now.
        * 
- * - * optional bytes initial_event_bytes = 10; - * @return This builder for chaining. */ public Builder clearInitialEventBytes() { bitField0_ = (bitField0_ & ~0x00000200); @@ -12913,27 +11533,22 @@ public Builder clearInitialEventBytes() { return this; } + // optional bytes initial_event_signature = 11; private com.google.protobuf.ByteString initialEventSignature_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes initial_event_signature = 11; - * @return Whether the initialEventSignature field is set. */ - @java.lang.Override public boolean hasInitialEventSignature() { - return ((bitField0_ & 0x00000400) != 0); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional bytes initial_event_signature = 11; - * @return The initialEventSignature. */ - @java.lang.Override public com.google.protobuf.ByteString getInitialEventSignature() { return initialEventSignature_; } /** * optional bytes initial_event_signature = 11; - * @param value The initialEventSignature to set. - * @return This builder for chaining. */ public Builder setInitialEventSignature(com.google.protobuf.ByteString value) { if (value == null) { @@ -12946,7 +11561,6 @@ public Builder setInitialEventSignature(com.google.protobuf.ByteString value) { } /** * optional bytes initial_event_signature = 11; - * @return This builder for chaining. */ public Builder clearInitialEventSignature() { bitField0_ = (bitField0_ & ~0x00000400); @@ -12955,27 +11569,22 @@ public Builder clearInitialEventSignature() { return this; } + // optional bool is_guaranteed = 12 [default = false]; private boolean isGuaranteed_ ; /** * optional bool is_guaranteed = 12 [default = false]; - * @return Whether the isGuaranteed field is set. */ - @java.lang.Override public boolean hasIsGuaranteed() { - return ((bitField0_ & 0x00000800) != 0); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional bool is_guaranteed = 12 [default = false]; - * @return The isGuaranteed. */ - @java.lang.Override public boolean getIsGuaranteed() { return isGuaranteed_; } /** * optional bool is_guaranteed = 12 [default = false]; - * @param value The isGuaranteed to set. - * @return This builder for chaining. */ public Builder setIsGuaranteed(boolean value) { bitField0_ |= 0x00000800; @@ -12985,7 +11594,6 @@ public Builder setIsGuaranteed(boolean value) { } /** * optional bool is_guaranteed = 12 [default = false]; - * @return This builder for chaining. */ public Builder clearIsGuaranteed() { bitField0_ = (bitField0_ & ~0x00000800); @@ -12994,27 +11602,23 @@ public Builder clearIsGuaranteed() { return this; } + // optional string jwt = 13; private java.lang.Object jwt_ = ""; /** * optional string jwt = 13; - * @return Whether the jwt field is set. */ public boolean hasJwt() { - return ((bitField0_ & 0x00001000) != 0); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional string jwt = 13; - * @return The jwt. */ public java.lang.String getJwt() { java.lang.Object ref = jwt_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - jwt_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + jwt_ = s; return s; } else { return (java.lang.String) ref; @@ -13022,7 +11626,6 @@ public java.lang.String getJwt() { } /** * optional string jwt = 13; - * @return The bytes for jwt. */ public com.google.protobuf.ByteString getJwtBytes() { @@ -13039,8 +11642,6 @@ public java.lang.String getJwt() { } /** * optional string jwt = 13; - * @param value The jwt to set. - * @return This builder for chaining. */ public Builder setJwt( java.lang.String value) { @@ -13054,7 +11655,6 @@ public Builder setJwt( } /** * optional string jwt = 13; - * @return This builder for chaining. */ public Builder clearJwt() { bitField0_ = (bitField0_ & ~0x00001000); @@ -13064,8 +11664,6 @@ public Builder clearJwt() { } /** * optional string jwt = 13; - * @param value The bytes for jwt to set. - * @return This builder for chaining. */ public Builder setJwtBytes( com.google.protobuf.ByteString value) { @@ -13078,27 +11676,22 @@ public Builder setJwtBytes( return this; } + // optional bool is_external_client_request = 14 [default = false]; private boolean isExternalClientRequest_ ; /** * optional bool is_external_client_request = 14 [default = false]; - * @return Whether the isExternalClientRequest field is set. */ - @java.lang.Override public boolean hasIsExternalClientRequest() { - return ((bitField0_ & 0x00002000) != 0); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional bool is_external_client_request = 14 [default = false]; - * @return The isExternalClientRequest. */ - @java.lang.Override public boolean getIsExternalClientRequest() { return isExternalClientRequest_; } /** * optional bool is_external_client_request = 14 [default = false]; - * @param value The isExternalClientRequest to set. - * @return This builder for chaining. */ public Builder setIsExternalClientRequest(boolean value) { bitField0_ |= 0x00002000; @@ -13108,7 +11701,6 @@ public Builder setIsExternalClientRequest(boolean value) { } /** * optional bool is_external_client_request = 14 [default = false]; - * @return This builder for chaining. */ public Builder clearIsExternalClientRequest() { bitField0_ = (bitField0_ & ~0x00002000); @@ -13116,88 +11708,43 @@ public Builder clearIsExternalClientRequest() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SubmitWorkRequestProto) } - // @@protoc_insertion_point(class_scope:SubmitWorkRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SubmitWorkRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SubmitWorkRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SubmitWorkRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SubmitWorkRequestProto) } - public interface RegisterDagRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:RegisterDagRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface RegisterDagRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string user = 1; /** * optional string user = 1; - * @return Whether the user field is set. */ boolean hasUser(); /** * optional string user = 1; - * @return The user. */ java.lang.String getUser(); /** * optional string user = 1; - * @return The bytes for user. */ com.google.protobuf.ByteString getUserBytes(); + // required .QueryIdentifierProto query_identifier = 2; /** * required .QueryIdentifierProto query_identifier = 2; - * @return Whether the queryIdentifier field is set. */ boolean hasQueryIdentifier(); /** * required .QueryIdentifierProto query_identifier = 2; - * @return The queryIdentifier. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); /** @@ -13205,14 +11752,13 @@ public interface RegisterDagRequestProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + // optional bytes credentials_binary = 3; /** * optional bytes credentials_binary = 3; - * @return Whether the credentialsBinary field is set. */ boolean hasCredentialsBinary(); /** * optional bytes credentials_binary = 3; - * @return The credentialsBinary. */ com.google.protobuf.ByteString getCredentialsBinary(); } @@ -13220,39 +11766,35 @@ public interface RegisterDagRequestProtoOrBuilder extends * Protobuf type {@code RegisterDagRequestProto} */ public static final class RegisterDagRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:RegisterDagRequestProto) - RegisterDagRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements RegisterDagRequestProtoOrBuilder { // Use RegisterDagRequestProto.newBuilder() to construct. - private RegisterDagRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private RegisterDagRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private RegisterDagRequestProto() { - user_ = ""; - credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; + private RegisterDagRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final RegisterDagRequestProto defaultInstance; + public static RegisterDagRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new RegisterDagRequestProto(); + public RegisterDagRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private RegisterDagRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -13264,15 +11806,21 @@ private RegisterDagRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - user_ = bs; + user_ = input.readBytes(); break; } case 18: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { subBuilder = queryIdentifier_.toBuilder(); } queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); @@ -13288,22 +11836,13 @@ private RegisterDagRequestProto( credentialsBinary_ = input.readBytes(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -13314,30 +11853,41 @@ private RegisterDagRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegisterDagRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegisterDagRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string user = 1; public static final int USER_FIELD_NUMBER = 1; - private volatile java.lang.Object user_; + private java.lang.Object user_; /** * optional string user = 1; - * @return Whether the user field is set. */ - @java.lang.Override public boolean hasUser() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string user = 1; - * @return The user. */ - @java.lang.Override public java.lang.String getUser() { java.lang.Object ref = user_; if (ref instanceof java.lang.String) { @@ -13354,9 +11904,7 @@ public java.lang.String getUser() { } /** * optional string user = 1; - * @return The bytes for user. */ - @java.lang.Override public com.google.protobuf.ByteString getUserBytes() { java.lang.Object ref = user_; @@ -13371,57 +11919,53 @@ public java.lang.String getUser() { } } + // required .QueryIdentifierProto query_identifier = 2; public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 2; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; /** * required .QueryIdentifierProto query_identifier = 2; - * @return Whether the queryIdentifier field is set. */ - @java.lang.Override public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .QueryIdentifierProto query_identifier = 2; - * @return The queryIdentifier. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } /** * required .QueryIdentifierProto query_identifier = 2; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } + // optional bytes credentials_binary = 3; public static final int CREDENTIALS_BINARY_FIELD_NUMBER = 3; private com.google.protobuf.ByteString credentialsBinary_; /** * optional bytes credentials_binary = 3; - * @return Whether the credentialsBinary field is set. */ - @java.lang.Override public boolean hasCredentialsBinary() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes credentials_binary = 3; - * @return The credentialsBinary. */ - @java.lang.Override public com.google.protobuf.ByteString getCredentialsBinary() { return credentialsBinary_; } + private void initFields() { + user_ = ""; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; if (!hasQueryIdentifier()) { memoizedIsInitialized = 0; @@ -13431,43 +11975,51 @@ public final boolean isInitialized() { return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, user_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getUserBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - output.writeMessage(2, getQueryIdentifier()); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeMessage(2, queryIdentifier_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBytes(3, credentialsBinary_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, user_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getUserBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getQueryIdentifier()); + .computeMessageSize(2, queryIdentifier_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(3, credentialsBinary_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -13478,32 +12030,35 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto) obj; - if (hasUser() != other.hasUser()) return false; + boolean result = true; + result = result && (hasUser() == other.hasUser()); if (hasUser()) { - if (!getUser() - .equals(other.getUser())) return false; + result = result && getUser() + .equals(other.getUser()); } - if (hasQueryIdentifier() != other.hasQueryIdentifier()) return false; + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { - if (!getQueryIdentifier() - .equals(other.getQueryIdentifier())) return false; + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); } - if (hasCredentialsBinary() != other.hasCredentialsBinary()) return false; + result = result && (hasCredentialsBinary() == other.hasCredentialsBinary()); if (hasCredentialsBinary()) { - if (!getCredentialsBinary() - .equals(other.getCredentialsBinary())) return false; + result = result && getCredentialsBinary() + .equals(other.getCredentialsBinary()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasUser()) { hash = (37 * hash) + USER_FIELD_NUMBER; hash = (53 * hash) + getUser().hashCode(); @@ -13516,22 +12071,11 @@ public int hashCode() { hash = (37 * hash) + CREDENTIALS_BINARY_FIELD_NUMBER; hash = (53 * hash) + getCredentialsBinary().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -13555,59 +12099,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Re } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -13615,16 +12146,14 @@ protected Builder newBuilderForType( * Protobuf type {@code RegisterDagRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:RegisterDagRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -13637,23 +12166,25 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getQueryIdentifierFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); user_ = ""; bitField0_ = (bitField0_ & ~0x00000001); if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } else { queryIdentifierBuilder_.clear(); } @@ -13663,18 +12194,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -13683,24 +12215,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterD return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.user_ = user_; - if (((from_bitField0_ & 0x00000002) != 0)) { - if (queryIdentifierBuilder_ == null) { - result.queryIdentifier_ = queryIdentifier_; - } else { - result.queryIdentifier_ = queryIdentifierBuilder_.build(); - } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.credentialsBinary_ = credentialsBinary_; @@ -13709,39 +12240,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterD return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto)other); @@ -13764,20 +12262,18 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasCredentialsBinary()) { setCredentialsBinary(other.getCredentialsBinary()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { if (!hasQueryIdentifier()) { + return false; } return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -13787,7 +12283,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -13797,27 +12293,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string user = 1; private java.lang.Object user_ = ""; /** * optional string user = 1; - * @return Whether the user field is set. */ public boolean hasUser() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string user = 1; - * @return The user. */ public java.lang.String getUser() { java.lang.Object ref = user_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - user_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + user_ = s; return s; } else { return (java.lang.String) ref; @@ -13825,7 +12317,6 @@ public java.lang.String getUser() { } /** * optional string user = 1; - * @return The bytes for user. */ public com.google.protobuf.ByteString getUserBytes() { @@ -13842,8 +12333,6 @@ public java.lang.String getUser() { } /** * optional string user = 1; - * @param value The user to set. - * @return This builder for chaining. */ public Builder setUser( java.lang.String value) { @@ -13857,7 +12346,6 @@ public Builder setUser( } /** * optional string user = 1; - * @return This builder for chaining. */ public Builder clearUser() { bitField0_ = (bitField0_ & ~0x00000001); @@ -13867,8 +12355,6 @@ public Builder clearUser() { } /** * optional string user = 1; - * @param value The bytes for user to set. - * @return This builder for chaining. */ public Builder setUserBytes( com.google.protobuf.ByteString value) { @@ -13881,23 +12367,22 @@ public Builder setUserBytes( return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; - private com.google.protobuf.SingleFieldBuilderV3< + // required .QueryIdentifierProto query_identifier = 2; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; /** * required .QueryIdentifierProto query_identifier = 2; - * @return Whether the queryIdentifier field is set. */ public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * required .QueryIdentifierProto query_identifier = 2; - * @return The queryIdentifier. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } else { return queryIdentifierBuilder_.getMessage(); } @@ -13937,8 +12422,7 @@ public Builder setQueryIdentifier( */ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { if (queryIdentifierBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0) && - queryIdentifier_ != null && + if (((bitField0_ & 0x00000002) == 0x00000002) && queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); @@ -13957,7 +12441,7 @@ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapD */ public Builder clearQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); onChanged(); } else { queryIdentifierBuilder_.clear(); @@ -13980,20 +12464,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden if (queryIdentifierBuilder_ != null) { return queryIdentifierBuilder_.getMessageOrBuilder(); } else { - return queryIdentifier_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } } /** * required .QueryIdentifierProto query_identifier = 2; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> getQueryIdentifierFieldBuilder() { if (queryIdentifierBuilder_ == null) { - queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( - getQueryIdentifier(), + queryIdentifier_, getParentForChildren(), isClean()); queryIdentifier_ = null; @@ -14001,27 +12484,22 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return queryIdentifierBuilder_; } + // optional bytes credentials_binary = 3; private com.google.protobuf.ByteString credentialsBinary_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes credentials_binary = 3; - * @return Whether the credentialsBinary field is set. */ - @java.lang.Override public boolean hasCredentialsBinary() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bytes credentials_binary = 3; - * @return The credentialsBinary. */ - @java.lang.Override public com.google.protobuf.ByteString getCredentialsBinary() { return credentialsBinary_; } /** * optional bytes credentials_binary = 3; - * @param value The credentialsBinary to set. - * @return This builder for chaining. */ public Builder setCredentialsBinary(com.google.protobuf.ByteString value) { if (value == null) { @@ -14034,7 +12512,6 @@ public Builder setCredentialsBinary(com.google.protobuf.ByteString value) { } /** * optional bytes credentials_binary = 3; - * @return This builder for chaining. */ public Builder clearCredentialsBinary() { bitField0_ = (bitField0_ & ~0x00000004); @@ -14042,98 +12519,54 @@ public Builder clearCredentialsBinary() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:RegisterDagRequestProto) } - // @@protoc_insertion_point(class_scope:RegisterDagRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto(); + defaultInstance = new RegisterDagRequestProto(true); + defaultInstance.initFields(); } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; + // @@protoc_insertion_point(class_scope:RegisterDagRequestProto) + } + + public interface RegisterDagResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + } + /** + * Protobuf type {@code RegisterDagResponseProto} + */ + public static final class RegisterDagResponseProto extends + com.google.protobuf.GeneratedMessage + implements RegisterDagResponseProtoOrBuilder { + // Use RegisterDagResponseProto.newBuilder() to construct. + private RegisterDagResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + this.unknownFields = builder.getUnknownFields(); } + private RegisterDagResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public RegisterDagRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RegisterDagRequestProto(input, extensionRegistry); - } - }; + private static final RegisterDagResponseProto defaultInstance; + public static RegisterDagResponseProto getDefaultInstance() { + return defaultInstance; + } - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - - } - - public interface RegisterDagResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:RegisterDagResponseProto) - com.google.protobuf.MessageOrBuilder { - } - /** - * Protobuf type {@code RegisterDagResponseProto} - */ - public static final class RegisterDagResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:RegisterDagResponseProto) - RegisterDagResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; - // Use RegisterDagResponseProto.newBuilder() to construct. - private RegisterDagResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - private RegisterDagResponseProto() { - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new RegisterDagResponseProto(); + public RegisterDagResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private RegisterDagResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -14145,8 +12578,8 @@ private RegisterDagResponseProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -14155,11 +12588,9 @@ private RegisterDagResponseProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -14170,42 +12601,63 @@ private RegisterDagResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public RegisterDagResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new RegisterDagResponseProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -14216,33 +12668,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -14266,59 +12710,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Re } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -14326,16 +12757,14 @@ protected Builder newBuilderForType( * Protobuf type {@code RegisterDagResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:RegisterDagResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -14348,33 +12777,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_RegisterDagResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -14383,46 +12815,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterD return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto)other); @@ -14434,17 +12832,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -14454,7 +12849,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -14462,87 +12857,42 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:RegisterDagResponseProto) } - // @@protoc_insertion_point(class_scope:RegisterDagResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public RegisterDagResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new RegisterDagResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.RegisterDagResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new RegisterDagResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:RegisterDagResponseProto) } - public interface SubmitWorkResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:SubmitWorkResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface SubmitWorkResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .SubmissionStateProto submission_state = 1; /** * optional .SubmissionStateProto submission_state = 1; - * @return Whether the submissionState field is set. */ boolean hasSubmissionState(); /** * optional .SubmissionStateProto submission_state = 1; - * @return The submissionState. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto getSubmissionState(); + // optional string unique_node_id = 2; /** * optional string unique_node_id = 2; - * @return Whether the uniqueNodeId field is set. */ boolean hasUniqueNodeId(); /** * optional string unique_node_id = 2; - * @return The uniqueNodeId. */ java.lang.String getUniqueNodeId(); /** * optional string unique_node_id = 2; - * @return The bytes for uniqueNodeId. */ com.google.protobuf.ByteString getUniqueNodeIdBytes(); @@ -14551,39 +12901,35 @@ public interface SubmitWorkResponseProtoOrBuilder extends * Protobuf type {@code SubmitWorkResponseProto} */ public static final class SubmitWorkResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SubmitWorkResponseProto) - SubmitWorkResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SubmitWorkResponseProtoOrBuilder { // Use SubmitWorkResponseProto.newBuilder() to construct. - private SubmitWorkResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SubmitWorkResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SubmitWorkResponseProto() { - submissionState_ = 1; - uniqueNodeId_ = ""; + private SubmitWorkResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SubmitWorkResponseProto defaultInstance; + public static SubmitWorkResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SubmitWorkResponseProto(); + public SubmitWorkResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SubmitWorkResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -14595,40 +12941,36 @@ private SubmitWorkResponseProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 8: { int rawValue = input.readEnum(); - @SuppressWarnings("deprecation") org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto value = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(1, rawValue); } else { bitField0_ |= 0x00000001; - submissionState_ = rawValue; + submissionState_ = value; } break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - uniqueNodeId_ = bs; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + uniqueNodeId_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -14639,49 +12981,57 @@ private SubmitWorkResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SubmitWorkResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SubmitWorkResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .SubmissionStateProto submission_state = 1; public static final int SUBMISSION_STATE_FIELD_NUMBER = 1; - private int submissionState_; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto submissionState_; /** * optional .SubmissionStateProto submission_state = 1; - * @return Whether the submissionState field is set. */ - @java.lang.Override public boolean hasSubmissionState() { - return ((bitField0_ & 0x00000001) != 0); + public boolean hasSubmissionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .SubmissionStateProto submission_state = 1; - * @return The submissionState. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto getSubmissionState() { - @SuppressWarnings("deprecation") - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto result = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.valueOf(submissionState_); - return result == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.ACCEPTED : result; + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto getSubmissionState() { + return submissionState_; } + // optional string unique_node_id = 2; public static final int UNIQUE_NODE_ID_FIELD_NUMBER = 2; - private volatile java.lang.Object uniqueNodeId_; + private java.lang.Object uniqueNodeId_; /** * optional string unique_node_id = 2; - * @return Whether the uniqueNodeId field is set. */ - @java.lang.Override public boolean hasUniqueNodeId() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string unique_node_id = 2; - * @return The uniqueNodeId. */ - @java.lang.Override public java.lang.String getUniqueNodeId() { java.lang.Object ref = uniqueNodeId_; if (ref instanceof java.lang.String) { @@ -14698,9 +13048,7 @@ public java.lang.String getUniqueNodeId() { } /** * optional string unique_node_id = 2; - * @return The bytes for uniqueNodeId. */ - @java.lang.Override public com.google.protobuf.ByteString getUniqueNodeIdBytes() { java.lang.Object ref = uniqueNodeId_; @@ -14715,47 +13063,57 @@ public java.lang.String getUniqueNodeId() { } } + private void initFields() { + submissionState_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.ACCEPTED; + uniqueNodeId_ = ""; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeEnum(1, submissionState_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeEnum(1, submissionState_.getNumber()); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, uniqueNodeId_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getUniqueNodeIdBytes()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(1, submissionState_); + .computeEnumSize(1, submissionState_.getNumber()); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, uniqueNodeId_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getUniqueNodeIdBytes()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -14766,50 +13124,43 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) obj; - if (hasSubmissionState() != other.hasSubmissionState()) return false; + boolean result = true; + result = result && (hasSubmissionState() == other.hasSubmissionState()); if (hasSubmissionState()) { - if (submissionState_ != other.submissionState_) return false; + result = result && + (getSubmissionState() == other.getSubmissionState()); } - if (hasUniqueNodeId() != other.hasUniqueNodeId()) return false; + result = result && (hasUniqueNodeId() == other.hasUniqueNodeId()); if (hasUniqueNodeId()) { - if (!getUniqueNodeId() - .equals(other.getUniqueNodeId())) return false; + result = result && getUniqueNodeId() + .equals(other.getUniqueNodeId()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasSubmissionState()) { hash = (37 * hash) + SUBMISSION_STATE_FIELD_NUMBER; - hash = (53 * hash) + submissionState_; + hash = (53 * hash) + hashEnum(getSubmissionState()); } if (hasUniqueNodeId()) { hash = (37 * hash) + UNIQUE_NODE_ID_FIELD_NUMBER; hash = (53 * hash) + getUniqueNodeId().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -14833,59 +13184,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Su } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -14893,16 +13231,14 @@ protected Builder newBuilderForType( * Protobuf type {@code SubmitWorkResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SubmitWorkResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -14915,37 +13251,40 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); - submissionState_ = 1; + submissionState_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.ACCEPTED; bitField0_ = (bitField0_ & ~0x00000001); uniqueNodeId_ = ""; bitField0_ = (bitField0_ & ~0x00000002); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SubmitWorkResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -14954,16 +13293,15 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.submissionState_ = submissionState_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.uniqueNodeId_ = uniqueNodeId_; @@ -14972,39 +13310,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWor return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto)other); @@ -15024,17 +13329,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc uniqueNodeId_ = other.uniqueNodeId_; onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -15044,7 +13346,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -15054,70 +13356,59 @@ public Builder mergeFrom( } private int bitField0_; - private int submissionState_ = 1; + // optional .SubmissionStateProto submission_state = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto submissionState_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.ACCEPTED; /** * optional .SubmissionStateProto submission_state = 1; - * @return Whether the submissionState field is set. */ - @java.lang.Override public boolean hasSubmissionState() { - return ((bitField0_ & 0x00000001) != 0); + public boolean hasSubmissionState() { + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .SubmissionStateProto submission_state = 1; - * @return The submissionState. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto getSubmissionState() { - @SuppressWarnings("deprecation") - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto result = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.valueOf(submissionState_); - return result == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.ACCEPTED : result; + return submissionState_; } /** * optional .SubmissionStateProto submission_state = 1; - * @param value The submissionState to set. - * @return This builder for chaining. */ public Builder setSubmissionState(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000001; - submissionState_ = value.getNumber(); + submissionState_ = value; onChanged(); return this; } /** * optional .SubmissionStateProto submission_state = 1; - * @return This builder for chaining. */ public Builder clearSubmissionState() { bitField0_ = (bitField0_ & ~0x00000001); - submissionState_ = 1; + submissionState_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmissionStateProto.ACCEPTED; onChanged(); return this; } + // optional string unique_node_id = 2; private java.lang.Object uniqueNodeId_ = ""; /** * optional string unique_node_id = 2; - * @return Whether the uniqueNodeId field is set. */ public boolean hasUniqueNodeId() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string unique_node_id = 2; - * @return The uniqueNodeId. */ public java.lang.String getUniqueNodeId() { java.lang.Object ref = uniqueNodeId_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - uniqueNodeId_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + uniqueNodeId_ = s; return s; } else { return (java.lang.String) ref; @@ -15125,7 +13416,6 @@ public java.lang.String getUniqueNodeId() { } /** * optional string unique_node_id = 2; - * @return The bytes for uniqueNodeId. */ public com.google.protobuf.ByteString getUniqueNodeIdBytes() { @@ -15142,8 +13432,6 @@ public java.lang.String getUniqueNodeId() { } /** * optional string unique_node_id = 2; - * @param value The uniqueNodeId to set. - * @return This builder for chaining. */ public Builder setUniqueNodeId( java.lang.String value) { @@ -15157,7 +13445,6 @@ public Builder setUniqueNodeId( } /** * optional string unique_node_id = 2; - * @return This builder for chaining. */ public Builder clearUniqueNodeId() { bitField0_ = (bitField0_ & ~0x00000002); @@ -15167,8 +13454,6 @@ public Builder clearUniqueNodeId() { } /** * optional string unique_node_id = 2; - * @param value The bytes for uniqueNodeId to set. - * @return This builder for chaining. */ public Builder setUniqueNodeIdBytes( com.google.protobuf.ByteString value) { @@ -15180,71 +13465,28 @@ public Builder setUniqueNodeIdBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SubmitWorkResponseProto) } - // @@protoc_insertion_point(class_scope:SubmitWorkResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SubmitWorkResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SubmitWorkResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SubmitWorkResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SubmitWorkResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SubmitWorkResponseProto) } - public interface SourceStateUpdatedRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:SourceStateUpdatedRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface SourceStateUpdatedRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .QueryIdentifierProto query_identifier = 1; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ boolean hasQueryIdentifier(); /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); /** @@ -15252,31 +13494,28 @@ public interface SourceStateUpdatedRequestProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + // optional string src_name = 2; /** * optional string src_name = 2; - * @return Whether the srcName field is set. */ boolean hasSrcName(); /** * optional string src_name = 2; - * @return The srcName. */ java.lang.String getSrcName(); /** * optional string src_name = 2; - * @return The bytes for srcName. */ com.google.protobuf.ByteString getSrcNameBytes(); + // optional .SourceStateProto state = 3; /** * optional .SourceStateProto state = 3; - * @return Whether the state field is set. */ boolean hasState(); /** * optional .SourceStateProto state = 3; - * @return The state. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto getState(); } @@ -15284,39 +13523,35 @@ public interface SourceStateUpdatedRequestProtoOrBuilder extends * Protobuf type {@code SourceStateUpdatedRequestProto} */ public static final class SourceStateUpdatedRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SourceStateUpdatedRequestProto) - SourceStateUpdatedRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SourceStateUpdatedRequestProtoOrBuilder { // Use SourceStateUpdatedRequestProto.newBuilder() to construct. - private SourceStateUpdatedRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SourceStateUpdatedRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SourceStateUpdatedRequestProto() { - srcName_ = ""; - state_ = 1; + private SourceStateUpdatedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SourceStateUpdatedRequestProto defaultInstance; + public static SourceStateUpdatedRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SourceStateUpdatedRequestProto(); + public SourceStateUpdatedRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SourceStateUpdatedRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -15328,9 +13563,16 @@ private SourceStateUpdatedRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = queryIdentifier_.toBuilder(); } queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); @@ -15342,27 +13584,18 @@ private SourceStateUpdatedRequestProto( break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - srcName_ = bs; + srcName_ = input.readBytes(); break; } case 24: { int rawValue = input.readEnum(); - @SuppressWarnings("deprecation") org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto value = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(3, rawValue); } else { bitField0_ |= 0x00000004; - state_ = rawValue; - } - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; + state_ = value; } break; } @@ -15370,11 +13603,9 @@ private SourceStateUpdatedRequestProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -15385,56 +13616,63 @@ private SourceStateUpdatedRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SourceStateUpdatedRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SourceStateUpdatedRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .QueryIdentifierProto query_identifier = 1; public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ - @java.lang.Override public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } /** * optional .QueryIdentifierProto query_identifier = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } + // optional string src_name = 2; public static final int SRC_NAME_FIELD_NUMBER = 2; - private volatile java.lang.Object srcName_; + private java.lang.Object srcName_; /** * optional string src_name = 2; - * @return Whether the srcName field is set. */ - @java.lang.Override public boolean hasSrcName() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string src_name = 2; - * @return The srcName. */ - @java.lang.Override public java.lang.String getSrcName() { java.lang.Object ref = srcName_; if (ref instanceof java.lang.String) { @@ -15451,9 +13689,7 @@ public java.lang.String getSrcName() { } /** * optional string src_name = 2; - * @return The bytes for srcName. */ - @java.lang.Override public com.google.protobuf.ByteString getSrcNameBytes() { java.lang.Object ref = srcName_; @@ -15468,73 +13704,81 @@ public java.lang.String getSrcName() { } } + // optional .SourceStateProto state = 3; public static final int STATE_FIELD_NUMBER = 3; - private int state_; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto state_; /** * optional .SourceStateProto state = 3; - * @return Whether the state field is set. */ - @java.lang.Override public boolean hasState() { - return ((bitField0_ & 0x00000004) != 0); + public boolean hasState() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .SourceStateProto state = 3; - * @return The state. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto getState() { - @SuppressWarnings("deprecation") - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto result = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.valueOf(state_); - return result == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED : result; + public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto getState() { + return state_; } + private void initFields() { + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + srcName_ = ""; + state_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getQueryIdentifier()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, srcName_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getSrcNameBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { - output.writeEnum(3, state_); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeEnum(3, state_.getNumber()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getQueryIdentifier()); + .computeMessageSize(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, srcName_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getSrcNameBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(3, state_); + .computeEnumSize(3, state_.getNumber()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -15545,31 +13789,35 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto) obj; - if (hasQueryIdentifier() != other.hasQueryIdentifier()) return false; + boolean result = true; + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { - if (!getQueryIdentifier() - .equals(other.getQueryIdentifier())) return false; + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); } - if (hasSrcName() != other.hasSrcName()) return false; + result = result && (hasSrcName() == other.hasSrcName()); if (hasSrcName()) { - if (!getSrcName() - .equals(other.getSrcName())) return false; + result = result && getSrcName() + .equals(other.getSrcName()); } - if (hasState() != other.hasState()) return false; + result = result && (hasState() == other.hasState()); if (hasState()) { - if (state_ != other.state_) return false; + result = result && + (getState() == other.getState()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasQueryIdentifier()) { hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; hash = (53 * hash) + getQueryIdentifier().hashCode(); @@ -15580,24 +13828,13 @@ public int hashCode() { } if (hasState()) { hash = (37 * hash) + STATE_FIELD_NUMBER; - hash = (53 * hash) + state_; + hash = (53 * hash) + hashEnum(getState()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -15621,59 +13858,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.So } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -15681,16 +13905,14 @@ protected Builder newBuilderForType( * Protobuf type {@code SourceStateUpdatedRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SourceStateUpdatedRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -15703,44 +13925,47 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getQueryIdentifierFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } else { queryIdentifierBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000001); srcName_ = ""; bitField0_ = (bitField0_ & ~0x00000002); - state_ = 1; + state_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED; bitField0_ = (bitField0_ & ~0x00000004); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -15749,24 +13974,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceSta return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (queryIdentifierBuilder_ == null) { - result.queryIdentifier_ = queryIdentifier_; - } else { - result.queryIdentifier_ = queryIdentifierBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.srcName_ = srcName_; - if (((from_bitField0_ & 0x00000004) != 0)) { + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.state_ = state_; @@ -15775,39 +13999,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceSta return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto)other); @@ -15830,17 +14021,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasState()) { setState(other.getState()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -15850,7 +14038,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -15860,23 +14048,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .QueryIdentifierProto query_identifier = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } else { return queryIdentifierBuilder_.getMessage(); } @@ -15916,8 +14103,7 @@ public Builder setQueryIdentifier( */ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { if (queryIdentifierBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - queryIdentifier_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); @@ -15936,7 +14122,7 @@ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapD */ public Builder clearQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); onChanged(); } else { queryIdentifierBuilder_.clear(); @@ -15959,20 +14145,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden if (queryIdentifierBuilder_ != null) { return queryIdentifierBuilder_.getMessageOrBuilder(); } else { - return queryIdentifier_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } } /** * optional .QueryIdentifierProto query_identifier = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> getQueryIdentifierFieldBuilder() { if (queryIdentifierBuilder_ == null) { - queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( - getQueryIdentifier(), + queryIdentifier_, getParentForChildren(), isClean()); queryIdentifier_ = null; @@ -15980,27 +14165,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return queryIdentifierBuilder_; } + // optional string src_name = 2; private java.lang.Object srcName_ = ""; /** * optional string src_name = 2; - * @return Whether the srcName field is set. */ public boolean hasSrcName() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string src_name = 2; - * @return The srcName. */ public java.lang.String getSrcName() { java.lang.Object ref = srcName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - srcName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + srcName_ = s; return s; } else { return (java.lang.String) ref; @@ -16008,7 +14189,6 @@ public java.lang.String getSrcName() { } /** * optional string src_name = 2; - * @return The bytes for srcName. */ public com.google.protobuf.ByteString getSrcNameBytes() { @@ -16025,8 +14205,6 @@ public java.lang.String getSrcName() { } /** * optional string src_name = 2; - * @param value The srcName to set. - * @return This builder for chaining. */ public Builder setSrcName( java.lang.String value) { @@ -16040,7 +14218,6 @@ public Builder setSrcName( } /** * optional string src_name = 2; - * @return This builder for chaining. */ public Builder clearSrcName() { bitField0_ = (bitField0_ & ~0x00000002); @@ -16050,8 +14227,6 @@ public Builder clearSrcName() { } /** * optional string src_name = 2; - * @param value The bytes for srcName to set. - * @return This builder for chaining. */ public Builder setSrcNameBytes( com.google.protobuf.ByteString value) { @@ -16064,140 +14239,89 @@ public Builder setSrcNameBytes( return this; } - private int state_ = 1; + // optional .SourceStateProto state = 3; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto state_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED; /** * optional .SourceStateProto state = 3; - * @return Whether the state field is set. */ - @java.lang.Override public boolean hasState() { - return ((bitField0_ & 0x00000004) != 0); + public boolean hasState() { + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .SourceStateProto state = 3; - * @return The state. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto getState() { - @SuppressWarnings("deprecation") - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto result = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.valueOf(state_); - return result == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED : result; + return state_; } /** * optional .SourceStateProto state = 3; - * @param value The state to set. - * @return This builder for chaining. */ public Builder setState(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00000004; - state_ = value.getNumber(); + state_ = value; onChanged(); return this; } /** * optional .SourceStateProto state = 3; - * @return This builder for chaining. */ public Builder clearState() { bitField0_ = (bitField0_ & ~0x00000004); - state_ = 1; + state_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateProto.S_SUCCEEDED; onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SourceStateUpdatedRequestProto) } - // @@protoc_insertion_point(class_scope:SourceStateUpdatedRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SourceStateUpdatedRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SourceStateUpdatedRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SourceStateUpdatedRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SourceStateUpdatedRequestProto) } - public interface SourceStateUpdatedResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:SourceStateUpdatedResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface SourceStateUpdatedResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code SourceStateUpdatedResponseProto} */ public static final class SourceStateUpdatedResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SourceStateUpdatedResponseProto) - SourceStateUpdatedResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SourceStateUpdatedResponseProtoOrBuilder { // Use SourceStateUpdatedResponseProto.newBuilder() to construct. - private SourceStateUpdatedResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SourceStateUpdatedResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SourceStateUpdatedResponseProto() { + private SourceStateUpdatedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SourceStateUpdatedResponseProto defaultInstance; + public static SourceStateUpdatedResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SourceStateUpdatedResponseProto(); + public SourceStateUpdatedResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SourceStateUpdatedResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -16209,8 +14333,8 @@ private SourceStateUpdatedResponseProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -16219,11 +14343,9 @@ private SourceStateUpdatedResponseProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -16234,42 +14356,63 @@ private SourceStateUpdatedResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SourceStateUpdatedResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SourceStateUpdatedResponseProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -16280,33 +14423,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -16330,59 +14465,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.So } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -16390,16 +14512,14 @@ protected Builder newBuilderForType( * Protobuf type {@code SourceStateUpdatedResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SourceStateUpdatedResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -16412,33 +14532,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SourceStateUpdatedResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -16447,46 +14570,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceSta return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto)other); @@ -16498,17 +14587,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -16518,7 +14604,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -16526,71 +14612,28 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SourceStateUpdatedResponseProto) } - // @@protoc_insertion_point(class_scope:SourceStateUpdatedResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SourceStateUpdatedResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SourceStateUpdatedResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SourceStateUpdatedResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SourceStateUpdatedResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SourceStateUpdatedResponseProto) } - public interface QueryCompleteRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:QueryCompleteRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface QueryCompleteRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .QueryIdentifierProto query_identifier = 1; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ boolean hasQueryIdentifier(); /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); /** @@ -16598,14 +14641,13 @@ public interface QueryCompleteRequestProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + // optional int64 delete_delay = 2 [default = 0]; /** * optional int64 delete_delay = 2 [default = 0]; - * @return Whether the deleteDelay field is set. */ boolean hasDeleteDelay(); /** * optional int64 delete_delay = 2 [default = 0]; - * @return The deleteDelay. */ long getDeleteDelay(); } @@ -16613,37 +14655,35 @@ public interface QueryCompleteRequestProtoOrBuilder extends * Protobuf type {@code QueryCompleteRequestProto} */ public static final class QueryCompleteRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:QueryCompleteRequestProto) - QueryCompleteRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements QueryCompleteRequestProtoOrBuilder { // Use QueryCompleteRequestProto.newBuilder() to construct. - private QueryCompleteRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private QueryCompleteRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private QueryCompleteRequestProto() { + private QueryCompleteRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final QueryCompleteRequestProto defaultInstance; + public static QueryCompleteRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new QueryCompleteRequestProto(); + public QueryCompleteRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private QueryCompleteRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -16655,9 +14695,16 @@ private QueryCompleteRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = queryIdentifier_.toBuilder(); } queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); @@ -16673,22 +14720,13 @@ private QueryCompleteRequestProto( deleteDelay_ = input.readInt64(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -16699,102 +14737,118 @@ private QueryCompleteRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public QueryCompleteRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new QueryCompleteRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .QueryIdentifierProto query_identifier = 1; public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ - @java.lang.Override public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } /** * optional .QueryIdentifierProto query_identifier = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } + // optional int64 delete_delay = 2 [default = 0]; public static final int DELETE_DELAY_FIELD_NUMBER = 2; private long deleteDelay_; /** * optional int64 delete_delay = 2 [default = 0]; - * @return Whether the deleteDelay field is set. */ - @java.lang.Override public boolean hasDeleteDelay() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 delete_delay = 2 [default = 0]; - * @return The deleteDelay. */ - @java.lang.Override public long getDeleteDelay() { return deleteDelay_; } + private void initFields() { + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + deleteDelay_ = 0L; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getQueryIdentifier()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, deleteDelay_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getQueryIdentifier()); + .computeMessageSize(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, deleteDelay_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -16805,52 +14859,43 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto) obj; - if (hasQueryIdentifier() != other.hasQueryIdentifier()) return false; + boolean result = true; + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { - if (!getQueryIdentifier() - .equals(other.getQueryIdentifier())) return false; + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); } - if (hasDeleteDelay() != other.hasDeleteDelay()) return false; + result = result && (hasDeleteDelay() == other.hasDeleteDelay()); if (hasDeleteDelay()) { - if (getDeleteDelay() - != other.getDeleteDelay()) return false; + result = result && (getDeleteDelay() + == other.getDeleteDelay()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasQueryIdentifier()) { hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; hash = (53 * hash) + getQueryIdentifier().hashCode(); } if (hasDeleteDelay()) { hash = (37 * hash) + DELETE_DELAY_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getDeleteDelay()); + hash = (53 * hash) + hashLong(getDeleteDelay()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -16874,59 +14919,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Qu } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -16934,16 +14966,14 @@ protected Builder newBuilderForType( * Protobuf type {@code QueryCompleteRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:QueryCompleteRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -16956,21 +14986,23 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getQueryIdentifierFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } else { queryIdentifierBuilder_.clear(); } @@ -16980,18 +15012,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -17000,61 +15033,27 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryComp return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (queryIdentifierBuilder_ == null) { - result.queryIdentifier_ = queryIdentifier_; - } else { - result.queryIdentifier_ = queryIdentifierBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { - result.deleteDelay_ = deleteDelay_; + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } + result.deleteDelay_ = deleteDelay_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto)other); @@ -17072,17 +15071,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasDeleteDelay()) { setDeleteDelay(other.getDeleteDelay()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -17092,7 +15088,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -17102,23 +15098,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .QueryIdentifierProto query_identifier = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } else { return queryIdentifierBuilder_.getMessage(); } @@ -17158,8 +15153,7 @@ public Builder setQueryIdentifier( */ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { if (queryIdentifierBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - queryIdentifier_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); @@ -17178,7 +15172,7 @@ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapD */ public Builder clearQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); onChanged(); } else { queryIdentifierBuilder_.clear(); @@ -17201,20 +15195,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden if (queryIdentifierBuilder_ != null) { return queryIdentifierBuilder_.getMessageOrBuilder(); } else { - return queryIdentifier_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } } /** * optional .QueryIdentifierProto query_identifier = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> getQueryIdentifierFieldBuilder() { if (queryIdentifierBuilder_ == null) { - queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( - getQueryIdentifier(), + queryIdentifier_, getParentForChildren(), isClean()); queryIdentifier_ = null; @@ -17222,27 +15215,22 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return queryIdentifierBuilder_; } + // optional int64 delete_delay = 2 [default = 0]; private long deleteDelay_ ; /** * optional int64 delete_delay = 2 [default = 0]; - * @return Whether the deleteDelay field is set. */ - @java.lang.Override public boolean hasDeleteDelay() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 delete_delay = 2 [default = 0]; - * @return The deleteDelay. */ - @java.lang.Override public long getDeleteDelay() { return deleteDelay_; } /** * optional int64 delete_delay = 2 [default = 0]; - * @param value The deleteDelay to set. - * @return This builder for chaining. */ public Builder setDeleteDelay(long value) { bitField0_ |= 0x00000002; @@ -17252,7 +15240,6 @@ public Builder setDeleteDelay(long value) { } /** * optional int64 delete_delay = 2 [default = 0]; - * @return This builder for chaining. */ public Builder clearDeleteDelay() { bitField0_ = (bitField0_ & ~0x00000002); @@ -17260,98 +15247,54 @@ public Builder clearDeleteDelay() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:QueryCompleteRequestProto) } - // @@protoc_insertion_point(class_scope:QueryCompleteRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public QueryCompleteRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new QueryCompleteRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new QueryCompleteRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:QueryCompleteRequestProto) } - public interface QueryCompleteResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:QueryCompleteResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface QueryCompleteResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code QueryCompleteResponseProto} */ public static final class QueryCompleteResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:QueryCompleteResponseProto) - QueryCompleteResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements QueryCompleteResponseProtoOrBuilder { // Use QueryCompleteResponseProto.newBuilder() to construct. - private QueryCompleteResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private QueryCompleteResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private QueryCompleteResponseProto() { + private QueryCompleteResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final QueryCompleteResponseProto defaultInstance; + public static QueryCompleteResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new QueryCompleteResponseProto(); + public QueryCompleteResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private QueryCompleteResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -17363,8 +15306,8 @@ private QueryCompleteResponseProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -17373,11 +15316,9 @@ private QueryCompleteResponseProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -17388,42 +15329,63 @@ private QueryCompleteResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public QueryCompleteResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new QueryCompleteResponseProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -17434,33 +15396,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -17484,59 +15438,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Qu } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -17544,16 +15485,14 @@ protected Builder newBuilderForType( * Protobuf type {@code QueryCompleteResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:QueryCompleteResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -17566,33 +15505,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_QueryCompleteResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -17601,46 +15543,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryComp return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto)other); @@ -17652,17 +15560,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -17672,7 +15577,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -17680,71 +15585,28 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - - // @@protoc_insertion_point(builder_scope:QueryCompleteResponseProto) - } - - // @@protoc_insertion_point(class_scope:QueryCompleteResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto DEFAULT_INSTANCE; - static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public QueryCompleteResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new QueryCompleteResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; + + // @@protoc_insertion_point(builder_scope:QueryCompleteResponseProto) } - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryCompleteResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + static { + defaultInstance = new QueryCompleteResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:QueryCompleteResponseProto) } - public interface TerminateFragmentRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:TerminateFragmentRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface TerminateFragmentRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .QueryIdentifierProto query_identifier = 1; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ boolean hasQueryIdentifier(); /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); /** @@ -17752,19 +15614,17 @@ public interface TerminateFragmentRequestProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + // optional string fragment_identifier_string = 2; /** * optional string fragment_identifier_string = 2; - * @return Whether the fragmentIdentifierString field is set. */ boolean hasFragmentIdentifierString(); /** * optional string fragment_identifier_string = 2; - * @return The fragmentIdentifierString. */ java.lang.String getFragmentIdentifierString(); /** * optional string fragment_identifier_string = 2; - * @return The bytes for fragmentIdentifierString. */ com.google.protobuf.ByteString getFragmentIdentifierStringBytes(); @@ -17773,38 +15633,35 @@ public interface TerminateFragmentRequestProtoOrBuilder extends * Protobuf type {@code TerminateFragmentRequestProto} */ public static final class TerminateFragmentRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:TerminateFragmentRequestProto) - TerminateFragmentRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements TerminateFragmentRequestProtoOrBuilder { // Use TerminateFragmentRequestProto.newBuilder() to construct. - private TerminateFragmentRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private TerminateFragmentRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private TerminateFragmentRequestProto() { - fragmentIdentifierString_ = ""; + private TerminateFragmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TerminateFragmentRequestProto defaultInstance; + public static TerminateFragmentRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new TerminateFragmentRequestProto(); + public TerminateFragmentRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private TerminateFragmentRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -17816,9 +15673,16 @@ private TerminateFragmentRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = queryIdentifier_.toBuilder(); } queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); @@ -17830,27 +15694,17 @@ private TerminateFragmentRequestProto( break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - fragmentIdentifierString_ = bs; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + fragmentIdentifierString_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -17861,56 +15715,63 @@ private TerminateFragmentRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TerminateFragmentRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TerminateFragmentRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .QueryIdentifierProto query_identifier = 1; public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ - @java.lang.Override public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } /** * optional .QueryIdentifierProto query_identifier = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } + // optional string fragment_identifier_string = 2; public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 2; - private volatile java.lang.Object fragmentIdentifierString_; + private java.lang.Object fragmentIdentifierString_; /** * optional string fragment_identifier_string = 2; - * @return Whether the fragmentIdentifierString field is set. */ - @java.lang.Override public boolean hasFragmentIdentifierString() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string fragment_identifier_string = 2; - * @return The fragmentIdentifierString. */ - @java.lang.Override public java.lang.String getFragmentIdentifierString() { java.lang.Object ref = fragmentIdentifierString_; if (ref instanceof java.lang.String) { @@ -17927,9 +15788,7 @@ public java.lang.String getFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @return The bytes for fragmentIdentifierString. */ - @java.lang.Override public com.google.protobuf.ByteString getFragmentIdentifierStringBytes() { java.lang.Object ref = fragmentIdentifierString_; @@ -17944,47 +15803,57 @@ public java.lang.String getFragmentIdentifierString() { } } + private void initFields() { + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + fragmentIdentifierString_ = ""; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getQueryIdentifier()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, fragmentIdentifierString_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getFragmentIdentifierStringBytes()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getQueryIdentifier()); + .computeMessageSize(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, fragmentIdentifierString_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getFragmentIdentifierStringBytes()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -17995,27 +15864,30 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) obj; - if (hasQueryIdentifier() != other.hasQueryIdentifier()) return false; + boolean result = true; + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { - if (!getQueryIdentifier() - .equals(other.getQueryIdentifier())) return false; + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); } - if (hasFragmentIdentifierString() != other.hasFragmentIdentifierString()) return false; + result = result && (hasFragmentIdentifierString() == other.hasFragmentIdentifierString()); if (hasFragmentIdentifierString()) { - if (!getFragmentIdentifierString() - .equals(other.getFragmentIdentifierString())) return false; + result = result && getFragmentIdentifierString() + .equals(other.getFragmentIdentifierString()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasQueryIdentifier()) { hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; hash = (53 * hash) + getQueryIdentifier().hashCode(); @@ -18024,22 +15896,11 @@ public int hashCode() { hash = (37 * hash) + FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER; hash = (53 * hash) + getFragmentIdentifierString().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -18063,59 +15924,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Te } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -18123,16 +15971,14 @@ protected Builder newBuilderForType( * Protobuf type {@code TerminateFragmentRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:TerminateFragmentRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -18145,21 +15991,23 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getQueryIdentifierFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } else { queryIdentifierBuilder_.clear(); } @@ -18169,18 +16017,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -18189,20 +16038,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Terminate return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (queryIdentifierBuilder_ == null) { - result.queryIdentifier_ = queryIdentifier_; - } else { - result.queryIdentifier_ = queryIdentifierBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fragmentIdentifierString_ = fragmentIdentifierString_; @@ -18211,39 +16059,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Terminate return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto)other); @@ -18263,17 +16078,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc fragmentIdentifierString_ = other.fragmentIdentifierString_; onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -18283,7 +16095,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -18293,23 +16105,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .QueryIdentifierProto query_identifier = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } else { return queryIdentifierBuilder_.getMessage(); } @@ -18349,8 +16160,7 @@ public Builder setQueryIdentifier( */ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { if (queryIdentifierBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - queryIdentifier_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); @@ -18369,7 +16179,7 @@ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapD */ public Builder clearQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); onChanged(); } else { queryIdentifierBuilder_.clear(); @@ -18392,20 +16202,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden if (queryIdentifierBuilder_ != null) { return queryIdentifierBuilder_.getMessageOrBuilder(); } else { - return queryIdentifier_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } } /** * optional .QueryIdentifierProto query_identifier = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> getQueryIdentifierFieldBuilder() { if (queryIdentifierBuilder_ == null) { - queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( - getQueryIdentifier(), + queryIdentifier_, getParentForChildren(), isClean()); queryIdentifier_ = null; @@ -18413,27 +16222,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return queryIdentifierBuilder_; } + // optional string fragment_identifier_string = 2; private java.lang.Object fragmentIdentifierString_ = ""; /** * optional string fragment_identifier_string = 2; - * @return Whether the fragmentIdentifierString field is set. */ public boolean hasFragmentIdentifierString() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string fragment_identifier_string = 2; - * @return The fragmentIdentifierString. */ public java.lang.String getFragmentIdentifierString() { java.lang.Object ref = fragmentIdentifierString_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - fragmentIdentifierString_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + fragmentIdentifierString_ = s; return s; } else { return (java.lang.String) ref; @@ -18441,7 +16246,6 @@ public java.lang.String getFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @return The bytes for fragmentIdentifierString. */ public com.google.protobuf.ByteString getFragmentIdentifierStringBytes() { @@ -18458,8 +16262,6 @@ public java.lang.String getFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @param value The fragmentIdentifierString to set. - * @return This builder for chaining. */ public Builder setFragmentIdentifierString( java.lang.String value) { @@ -18473,7 +16275,6 @@ public Builder setFragmentIdentifierString( } /** * optional string fragment_identifier_string = 2; - * @return This builder for chaining. */ public Builder clearFragmentIdentifierString() { bitField0_ = (bitField0_ & ~0x00000002); @@ -18483,8 +16284,6 @@ public Builder clearFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @param value The bytes for fragmentIdentifierString to set. - * @return This builder for chaining. */ public Builder setFragmentIdentifierStringBytes( com.google.protobuf.ByteString value) { @@ -18496,98 +16295,54 @@ public Builder setFragmentIdentifierStringBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:TerminateFragmentRequestProto) } - // @@protoc_insertion_point(class_scope:TerminateFragmentRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public TerminateFragmentRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TerminateFragmentRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new TerminateFragmentRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:TerminateFragmentRequestProto) } - public interface TerminateFragmentResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:TerminateFragmentResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface TerminateFragmentResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code TerminateFragmentResponseProto} */ public static final class TerminateFragmentResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:TerminateFragmentResponseProto) - TerminateFragmentResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements TerminateFragmentResponseProtoOrBuilder { // Use TerminateFragmentResponseProto.newBuilder() to construct. - private TerminateFragmentResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private TerminateFragmentResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private TerminateFragmentResponseProto() { + private TerminateFragmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TerminateFragmentResponseProto defaultInstance; + public static TerminateFragmentResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new TerminateFragmentResponseProto(); + public TerminateFragmentResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private TerminateFragmentResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -18599,8 +16354,8 @@ private TerminateFragmentResponseProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -18609,11 +16364,9 @@ private TerminateFragmentResponseProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -18624,42 +16377,63 @@ private TerminateFragmentResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TerminateFragmentResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TerminateFragmentResponseProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -18670,33 +16444,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -18720,59 +16486,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Te } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -18780,16 +16533,14 @@ protected Builder newBuilderForType( * Protobuf type {@code TerminateFragmentResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:TerminateFragmentResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -18802,33 +16553,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TerminateFragmentResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -18837,46 +16591,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Terminate return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto)other); @@ -18888,17 +16608,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -18908,7 +16625,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -18916,71 +16633,28 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:TerminateFragmentResponseProto) } - // @@protoc_insertion_point(class_scope:TerminateFragmentResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public TerminateFragmentResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TerminateFragmentResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TerminateFragmentResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new TerminateFragmentResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:TerminateFragmentResponseProto) } - public interface UpdateFragmentRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:UpdateFragmentRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface UpdateFragmentRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .QueryIdentifierProto query_identifier = 1; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ boolean hasQueryIdentifier(); /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier(); /** @@ -18988,31 +16662,28 @@ public interface UpdateFragmentRequestProtoOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder(); + // optional string fragment_identifier_string = 2; /** * optional string fragment_identifier_string = 2; - * @return Whether the fragmentIdentifierString field is set. */ boolean hasFragmentIdentifierString(); /** * optional string fragment_identifier_string = 2; - * @return The fragmentIdentifierString. */ java.lang.String getFragmentIdentifierString(); /** * optional string fragment_identifier_string = 2; - * @return The bytes for fragmentIdentifierString. */ com.google.protobuf.ByteString getFragmentIdentifierStringBytes(); + // optional bool is_guaranteed = 3; /** * optional bool is_guaranteed = 3; - * @return Whether the isGuaranteed field is set. */ boolean hasIsGuaranteed(); /** * optional bool is_guaranteed = 3; - * @return The isGuaranteed. */ boolean getIsGuaranteed(); } @@ -19020,38 +16691,35 @@ public interface UpdateFragmentRequestProtoOrBuilder extends * Protobuf type {@code UpdateFragmentRequestProto} */ public static final class UpdateFragmentRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:UpdateFragmentRequestProto) - UpdateFragmentRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements UpdateFragmentRequestProtoOrBuilder { // Use UpdateFragmentRequestProto.newBuilder() to construct. - private UpdateFragmentRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private UpdateFragmentRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private UpdateFragmentRequestProto() { - fragmentIdentifierString_ = ""; + private UpdateFragmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateFragmentRequestProto defaultInstance; + public static UpdateFragmentRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new UpdateFragmentRequestProto(); + public UpdateFragmentRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private UpdateFragmentRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -19063,9 +16731,16 @@ private UpdateFragmentRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = queryIdentifier_.toBuilder(); } queryIdentifier_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.PARSER, extensionRegistry); @@ -19077,9 +16752,8 @@ private UpdateFragmentRequestProto( break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - fragmentIdentifierString_ = bs; + fragmentIdentifierString_ = input.readBytes(); break; } case 24: { @@ -19087,22 +16761,13 @@ private UpdateFragmentRequestProto( isGuaranteed_ = input.readBool(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -19113,56 +16778,63 @@ private UpdateFragmentRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateFragmentRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateFragmentRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .QueryIdentifierProto query_identifier = 1; public static final int QUERY_IDENTIFIER_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ - @java.lang.Override public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } /** * optional .QueryIdentifierProto query_identifier = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder getQueryIdentifierOrBuilder() { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } + // optional string fragment_identifier_string = 2; public static final int FRAGMENT_IDENTIFIER_STRING_FIELD_NUMBER = 2; - private volatile java.lang.Object fragmentIdentifierString_; + private java.lang.Object fragmentIdentifierString_; /** * optional string fragment_identifier_string = 2; - * @return Whether the fragmentIdentifierString field is set. */ - @java.lang.Override public boolean hasFragmentIdentifierString() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string fragment_identifier_string = 2; - * @return The fragmentIdentifierString. */ - @java.lang.Override public java.lang.String getFragmentIdentifierString() { java.lang.Object ref = fragmentIdentifierString_; if (ref instanceof java.lang.String) { @@ -19179,9 +16851,7 @@ public java.lang.String getFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @return The bytes for fragmentIdentifierString. */ - @java.lang.Override public com.google.protobuf.ByteString getFragmentIdentifierStringBytes() { java.lang.Object ref = fragmentIdentifierString_; @@ -19196,73 +16866,81 @@ public java.lang.String getFragmentIdentifierString() { } } + // optional bool is_guaranteed = 3; public static final int IS_GUARANTEED_FIELD_NUMBER = 3; private boolean isGuaranteed_; /** * optional bool is_guaranteed = 3; - * @return Whether the isGuaranteed field is set. */ - @java.lang.Override public boolean hasIsGuaranteed() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bool is_guaranteed = 3; - * @return The isGuaranteed. */ - @java.lang.Override public boolean getIsGuaranteed() { return isGuaranteed_; } + private void initFields() { + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + fragmentIdentifierString_ = ""; + isGuaranteed_ = false; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getQueryIdentifier()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, fragmentIdentifierString_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getFragmentIdentifierStringBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeBool(3, isGuaranteed_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getQueryIdentifier()); + .computeMessageSize(1, queryIdentifier_); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, fragmentIdentifierString_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getFragmentIdentifierStringBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(3, isGuaranteed_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -19273,32 +16951,35 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto) obj; - if (hasQueryIdentifier() != other.hasQueryIdentifier()) return false; + boolean result = true; + result = result && (hasQueryIdentifier() == other.hasQueryIdentifier()); if (hasQueryIdentifier()) { - if (!getQueryIdentifier() - .equals(other.getQueryIdentifier())) return false; + result = result && getQueryIdentifier() + .equals(other.getQueryIdentifier()); } - if (hasFragmentIdentifierString() != other.hasFragmentIdentifierString()) return false; + result = result && (hasFragmentIdentifierString() == other.hasFragmentIdentifierString()); if (hasFragmentIdentifierString()) { - if (!getFragmentIdentifierString() - .equals(other.getFragmentIdentifierString())) return false; + result = result && getFragmentIdentifierString() + .equals(other.getFragmentIdentifierString()); } - if (hasIsGuaranteed() != other.hasIsGuaranteed()) return false; + result = result && (hasIsGuaranteed() == other.hasIsGuaranteed()); if (hasIsGuaranteed()) { - if (getIsGuaranteed() - != other.getIsGuaranteed()) return false; + result = result && (getIsGuaranteed() + == other.getIsGuaranteed()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasQueryIdentifier()) { hash = (37 * hash) + QUERY_IDENTIFIER_FIELD_NUMBER; hash = (53 * hash) + getQueryIdentifier().hashCode(); @@ -19309,25 +16990,13 @@ public int hashCode() { } if (hasIsGuaranteed()) { hash = (37 * hash) + IS_GUARANTEED_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getIsGuaranteed()); + hash = (53 * hash) + hashBoolean(getIsGuaranteed()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -19351,59 +17020,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Up } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -19411,16 +17067,14 @@ protected Builder newBuilderForType( * Protobuf type {@code UpdateFragmentRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:UpdateFragmentRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -19433,21 +17087,23 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getQueryIdentifierFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); } else { queryIdentifierBuilder_.clear(); } @@ -19459,18 +17115,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -19479,65 +17136,31 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFra return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (queryIdentifierBuilder_ == null) { - result.queryIdentifier_ = queryIdentifier_; - } else { - result.queryIdentifier_ = queryIdentifierBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { + if (queryIdentifierBuilder_ == null) { + result.queryIdentifier_ = queryIdentifier_; + } else { + result.queryIdentifier_ = queryIdentifierBuilder_.build(); + } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.fragmentIdentifierString_ = fragmentIdentifierString_; - if (((from_bitField0_ & 0x00000004) != 0)) { - result.isGuaranteed_ = isGuaranteed_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + result.isGuaranteed_ = isGuaranteed_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto)other); @@ -19560,17 +17183,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasIsGuaranteed()) { setIsGuaranteed(other.getIsGuaranteed()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -19580,7 +17200,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -19590,23 +17210,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .QueryIdentifierProto query_identifier = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> queryIdentifierBuilder_; /** * optional .QueryIdentifierProto query_identifier = 1; - * @return Whether the queryIdentifier field is set. */ public boolean hasQueryIdentifier() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .QueryIdentifierProto query_identifier = 1; - * @return The queryIdentifier. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto getQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - return queryIdentifier_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } else { return queryIdentifierBuilder_.getMessage(); } @@ -19646,8 +17265,7 @@ public Builder setQueryIdentifier( */ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto value) { if (queryIdentifierBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - queryIdentifier_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && queryIdentifier_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance()) { queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.newBuilder(queryIdentifier_).mergeFrom(value).buildPartial(); @@ -19666,7 +17284,7 @@ public Builder mergeQueryIdentifier(org.apache.hadoop.hive.llap.daemon.rpc.LlapD */ public Builder clearQueryIdentifier() { if (queryIdentifierBuilder_ == null) { - queryIdentifier_ = null; + queryIdentifier_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance(); onChanged(); } else { queryIdentifierBuilder_.clear(); @@ -19689,20 +17307,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden if (queryIdentifierBuilder_ != null) { return queryIdentifierBuilder_.getMessageOrBuilder(); } else { - return queryIdentifier_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.getDefaultInstance() : queryIdentifier_; + return queryIdentifier_; } } /** * optional .QueryIdentifierProto query_identifier = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder> getQueryIdentifierFieldBuilder() { if (queryIdentifierBuilder_ == null) { - queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + queryIdentifierBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIdentifierProtoOrBuilder>( - getQueryIdentifier(), + queryIdentifier_, getParentForChildren(), isClean()); queryIdentifier_ = null; @@ -19710,27 +17327,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.QueryIden return queryIdentifierBuilder_; } + // optional string fragment_identifier_string = 2; private java.lang.Object fragmentIdentifierString_ = ""; /** * optional string fragment_identifier_string = 2; - * @return Whether the fragmentIdentifierString field is set. */ public boolean hasFragmentIdentifierString() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string fragment_identifier_string = 2; - * @return The fragmentIdentifierString. */ public java.lang.String getFragmentIdentifierString() { java.lang.Object ref = fragmentIdentifierString_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - fragmentIdentifierString_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + fragmentIdentifierString_ = s; return s; } else { return (java.lang.String) ref; @@ -19738,7 +17351,6 @@ public java.lang.String getFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @return The bytes for fragmentIdentifierString. */ public com.google.protobuf.ByteString getFragmentIdentifierStringBytes() { @@ -19755,8 +17367,6 @@ public java.lang.String getFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @param value The fragmentIdentifierString to set. - * @return This builder for chaining. */ public Builder setFragmentIdentifierString( java.lang.String value) { @@ -19770,7 +17380,6 @@ public Builder setFragmentIdentifierString( } /** * optional string fragment_identifier_string = 2; - * @return This builder for chaining. */ public Builder clearFragmentIdentifierString() { bitField0_ = (bitField0_ & ~0x00000002); @@ -19780,8 +17389,6 @@ public Builder clearFragmentIdentifierString() { } /** * optional string fragment_identifier_string = 2; - * @param value The bytes for fragmentIdentifierString to set. - * @return This builder for chaining. */ public Builder setFragmentIdentifierStringBytes( com.google.protobuf.ByteString value) { @@ -19794,27 +17401,22 @@ public Builder setFragmentIdentifierStringBytes( return this; } + // optional bool is_guaranteed = 3; private boolean isGuaranteed_ ; /** * optional bool is_guaranteed = 3; - * @return Whether the isGuaranteed field is set. */ - @java.lang.Override public boolean hasIsGuaranteed() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional bool is_guaranteed = 3; - * @return The isGuaranteed. */ - @java.lang.Override public boolean getIsGuaranteed() { return isGuaranteed_; } /** * optional bool is_guaranteed = 3; - * @param value The isGuaranteed to set. - * @return This builder for chaining. */ public Builder setIsGuaranteed(boolean value) { bitField0_ |= 0x00000004; @@ -19824,7 +17426,6 @@ public Builder setIsGuaranteed(boolean value) { } /** * optional bool is_guaranteed = 3; - * @return This builder for chaining. */ public Builder clearIsGuaranteed() { bitField0_ = (bitField0_ & ~0x00000004); @@ -19832,82 +17433,38 @@ public Builder clearIsGuaranteed() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:UpdateFragmentRequestProto) } - // @@protoc_insertion_point(class_scope:UpdateFragmentRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public UpdateFragmentRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UpdateFragmentRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new UpdateFragmentRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:UpdateFragmentRequestProto) } - public interface UpdateFragmentResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:UpdateFragmentResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface UpdateFragmentResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional bool result = 1; /** * optional bool result = 1; - * @return Whether the result field is set. */ boolean hasResult(); /** * optional bool result = 1; - * @return The result. */ boolean getResult(); + // optional bool is_guaranteed = 2; /** * optional bool is_guaranteed = 2; - * @return Whether the isGuaranteed field is set. */ boolean hasIsGuaranteed(); /** * optional bool is_guaranteed = 2; - * @return The isGuaranteed. */ boolean getIsGuaranteed(); } @@ -19915,37 +17472,35 @@ public interface UpdateFragmentResponseProtoOrBuilder extends * Protobuf type {@code UpdateFragmentResponseProto} */ public static final class UpdateFragmentResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:UpdateFragmentResponseProto) - UpdateFragmentResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements UpdateFragmentResponseProtoOrBuilder { // Use UpdateFragmentResponseProto.newBuilder() to construct. - private UpdateFragmentResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private UpdateFragmentResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private UpdateFragmentResponseProto() { + private UpdateFragmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateFragmentResponseProto defaultInstance; + public static UpdateFragmentResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new UpdateFragmentResponseProto(); + public UpdateFragmentResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private UpdateFragmentResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -19957,6 +17512,13 @@ private UpdateFragmentResponseProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 8: { bitField0_ |= 0x00000001; result_ = input.readBool(); @@ -19967,22 +17529,13 @@ private UpdateFragmentResponseProto( isGuaranteed_ = input.readBool(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -19993,95 +17546,112 @@ private UpdateFragmentResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateFragmentResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateFragmentResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional bool result = 1; public static final int RESULT_FIELD_NUMBER = 1; private boolean result_; /** * optional bool result = 1; - * @return Whether the result field is set. */ - @java.lang.Override public boolean hasResult() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bool result = 1; - * @return The result. */ - @java.lang.Override public boolean getResult() { return result_; } + // optional bool is_guaranteed = 2; public static final int IS_GUARANTEED_FIELD_NUMBER = 2; private boolean isGuaranteed_; /** * optional bool is_guaranteed = 2; - * @return Whether the isGuaranteed field is set. */ - @java.lang.Override public boolean hasIsGuaranteed() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool is_guaranteed = 2; - * @return The isGuaranteed. */ - @java.lang.Override public boolean getIsGuaranteed() { return isGuaranteed_; } + private void initFields() { + result_ = false; + isGuaranteed_ = false; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBool(1, result_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBool(2, isGuaranteed_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(1, result_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(2, isGuaranteed_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -20092,53 +17662,43 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto) obj; - if (hasResult() != other.hasResult()) return false; + boolean result = true; + result = result && (hasResult() == other.hasResult()); if (hasResult()) { - if (getResult() - != other.getResult()) return false; + result = result && (getResult() + == other.getResult()); } - if (hasIsGuaranteed() != other.hasIsGuaranteed()) return false; + result = result && (hasIsGuaranteed() == other.hasIsGuaranteed()); if (hasIsGuaranteed()) { - if (getIsGuaranteed() - != other.getIsGuaranteed()) return false; + result = result && (getIsGuaranteed() + == other.getIsGuaranteed()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getResult()); + hash = (53 * hash) + hashBoolean(getResult()); } if (hasIsGuaranteed()) { hash = (37 * hash) + IS_GUARANTEED_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getIsGuaranteed()); + hash = (53 * hash) + hashBoolean(getIsGuaranteed()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -20162,59 +17722,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Up } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -20222,16 +17769,14 @@ protected Builder newBuilderForType( * Protobuf type {@code UpdateFragmentResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:UpdateFragmentResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -20244,16 +17789,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); result_ = false; @@ -20263,18 +17810,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_UpdateFragmentResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -20283,57 +17831,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFra return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.result_ = result_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { - result.isGuaranteed_ = isGuaranteed_; + result.result_ = result_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } + result.isGuaranteed_ = isGuaranteed_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto)other); @@ -20351,17 +17865,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasIsGuaranteed()) { setIsGuaranteed(other.getIsGuaranteed()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -20371,7 +17882,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -20381,27 +17892,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional bool result = 1; private boolean result_ ; /** * optional bool result = 1; - * @return Whether the result field is set. */ - @java.lang.Override public boolean hasResult() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bool result = 1; - * @return The result. */ - @java.lang.Override public boolean getResult() { return result_; } /** * optional bool result = 1; - * @param value The result to set. - * @return This builder for chaining. */ public Builder setResult(boolean value) { bitField0_ |= 0x00000001; @@ -20411,7 +17917,6 @@ public Builder setResult(boolean value) { } /** * optional bool result = 1; - * @return This builder for chaining. */ public Builder clearResult() { bitField0_ = (bitField0_ & ~0x00000001); @@ -20420,27 +17925,22 @@ public Builder clearResult() { return this; } + // optional bool is_guaranteed = 2; private boolean isGuaranteed_ ; /** * optional bool is_guaranteed = 2; - * @return Whether the isGuaranteed field is set. */ - @java.lang.Override public boolean hasIsGuaranteed() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bool is_guaranteed = 2; - * @return The isGuaranteed. */ - @java.lang.Override public boolean getIsGuaranteed() { return isGuaranteed_; } /** * optional bool is_guaranteed = 2; - * @param value The isGuaranteed to set. - * @return This builder for chaining. */ public Builder setIsGuaranteed(boolean value) { bitField0_ |= 0x00000002; @@ -20450,7 +17950,6 @@ public Builder setIsGuaranteed(boolean value) { } /** * optional bool is_guaranteed = 2; - * @return This builder for chaining. */ public Builder clearIsGuaranteed() { bitField0_ = (bitField0_ & ~0x00000002); @@ -20458,76 +17957,32 @@ public Builder clearIsGuaranteed() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:UpdateFragmentResponseProto) } - // @@protoc_insertion_point(class_scope:UpdateFragmentResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public UpdateFragmentResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UpdateFragmentResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.UpdateFragmentResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new UpdateFragmentResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:UpdateFragmentResponseProto) } - public interface GetTokenRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GetTokenRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface GetTokenRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string app_id = 1; /** * optional string app_id = 1; - * @return Whether the appId field is set. */ boolean hasAppId(); /** * optional string app_id = 1; - * @return The appId. */ java.lang.String getAppId(); /** * optional string app_id = 1; - * @return The bytes for appId. */ com.google.protobuf.ByteString getAppIdBytes(); @@ -20536,38 +17991,35 @@ public interface GetTokenRequestProtoOrBuilder extends * Protobuf type {@code GetTokenRequestProto} */ public static final class GetTokenRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GetTokenRequestProto) - GetTokenRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GetTokenRequestProtoOrBuilder { // Use GetTokenRequestProto.newBuilder() to construct. - private GetTokenRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GetTokenRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GetTokenRequestProto() { - appId_ = ""; + private GetTokenRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTokenRequestProto defaultInstance; + public static GetTokenRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GetTokenRequestProto(); + public GetTokenRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GetTokenRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -20579,28 +18031,25 @@ private GetTokenRequestProto( case 0: done = true; break; - case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); - bitField0_ |= 0x00000001; - appId_ = bs; - break; - } default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; } + case 10: { + bitField0_ |= 0x00000001; + appId_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -20611,30 +18060,41 @@ private GetTokenRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTokenRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTokenRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string app_id = 1; public static final int APP_ID_FIELD_NUMBER = 1; - private volatile java.lang.Object appId_; + private java.lang.Object appId_; /** * optional string app_id = 1; - * @return Whether the appId field is set. */ - @java.lang.Override public boolean hasAppId() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string app_id = 1; - * @return The appId. */ - @java.lang.Override public java.lang.String getAppId() { java.lang.Object ref = appId_; if (ref instanceof java.lang.String) { @@ -20651,9 +18111,7 @@ public java.lang.String getAppId() { } /** * optional string app_id = 1; - * @return The bytes for appId. */ - @java.lang.Override public com.google.protobuf.ByteString getAppIdBytes() { java.lang.Object ref = appId_; @@ -20668,40 +18126,49 @@ public java.lang.String getAppId() { } } + private void initFields() { + appId_ = ""; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, appId_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getAppIdBytes()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, appId_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getAppIdBytes()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -20712,42 +18179,34 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto) obj; - if (hasAppId() != other.hasAppId()) return false; + boolean result = true; + result = result && (hasAppId() == other.hasAppId()); if (hasAppId()) { - if (!getAppId() - .equals(other.getAppId())) return false; + result = result && getAppId() + .equals(other.getAppId()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasAppId()) { hash = (37 * hash) + APP_ID_FIELD_NUMBER; hash = (53 * hash) + getAppId().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -20771,59 +18230,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ge } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -20831,16 +18277,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GetTokenRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GetTokenRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -20853,16 +18297,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); appId_ = ""; @@ -20870,18 +18316,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -20890,12 +18337,11 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenR return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.appId_ = appId_; @@ -20904,39 +18350,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenR return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto)other); @@ -20953,17 +18366,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc appId_ = other.appId_; onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -20973,7 +18383,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -20983,27 +18393,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string app_id = 1; private java.lang.Object appId_ = ""; /** * optional string app_id = 1; - * @return Whether the appId field is set. */ public boolean hasAppId() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string app_id = 1; - * @return The appId. */ public java.lang.String getAppId() { java.lang.Object ref = appId_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - appId_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + appId_ = s; return s; } else { return (java.lang.String) ref; @@ -21011,7 +18417,6 @@ public java.lang.String getAppId() { } /** * optional string app_id = 1; - * @return The bytes for appId. */ public com.google.protobuf.ByteString getAppIdBytes() { @@ -21028,8 +18433,6 @@ public java.lang.String getAppId() { } /** * optional string app_id = 1; - * @param value The appId to set. - * @return This builder for chaining. */ public Builder setAppId( java.lang.String value) { @@ -21043,7 +18446,6 @@ public Builder setAppId( } /** * optional string app_id = 1; - * @return This builder for chaining. */ public Builder clearAppId() { bitField0_ = (bitField0_ & ~0x00000001); @@ -21053,8 +18455,6 @@ public Builder clearAppId() { } /** * optional string app_id = 1; - * @param value The bytes for appId to set. - * @return This builder for chaining. */ public Builder setAppIdBytes( com.google.protobuf.ByteString value) { @@ -21066,71 +18466,28 @@ public Builder setAppIdBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GetTokenRequestProto) } - // @@protoc_insertion_point(class_scope:GetTokenRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetTokenRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetTokenRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GetTokenRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GetTokenRequestProto) } - public interface GetTokenResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GetTokenResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface GetTokenResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional bytes token = 1; /** * optional bytes token = 1; - * @return Whether the token field is set. */ boolean hasToken(); /** * optional bytes token = 1; - * @return The token. */ com.google.protobuf.ByteString getToken(); } @@ -21138,38 +18495,35 @@ public interface GetTokenResponseProtoOrBuilder extends * Protobuf type {@code GetTokenResponseProto} */ public static final class GetTokenResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GetTokenResponseProto) - GetTokenResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GetTokenResponseProtoOrBuilder { // Use GetTokenResponseProto.newBuilder() to construct. - private GetTokenResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GetTokenResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GetTokenResponseProto() { - token_ = com.google.protobuf.ByteString.EMPTY; + private GetTokenResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetTokenResponseProto defaultInstance; + public static GetTokenResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GetTokenResponseProto(); + public GetTokenResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GetTokenResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -21181,27 +18535,25 @@ private GetTokenResponseProto( case 0: done = true; break; - case 10: { - bitField0_ |= 0x00000001; - token_ = input.readBytes(); - break; - } default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; } + case 10: { + bitField0_ |= 0x00000001; + token_ = input.readBytes(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -21212,69 +18564,88 @@ private GetTokenResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetTokenResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetTokenResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional bytes token = 1; public static final int TOKEN_FIELD_NUMBER = 1; private com.google.protobuf.ByteString token_; /** * optional bytes token = 1; - * @return Whether the token field is set. */ - @java.lang.Override public boolean hasToken() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bytes token = 1; - * @return The token. */ - @java.lang.Override public com.google.protobuf.ByteString getToken() { return token_; } + private void initFields() { + token_ = com.google.protobuf.ByteString.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, token_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, token_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -21285,42 +18656,34 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto) obj; - if (hasToken() != other.hasToken()) return false; + boolean result = true; + result = result && (hasToken() == other.hasToken()); if (hasToken()) { - if (!getToken() - .equals(other.getToken())) return false; + result = result && getToken() + .equals(other.getToken()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasToken()) { hash = (37 * hash) + TOKEN_FIELD_NUMBER; hash = (53 * hash) + getToken().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -21344,59 +18707,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ge } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -21404,16 +18754,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GetTokenResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GetTokenResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -21426,16 +18774,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); token_ = com.google.protobuf.ByteString.EMPTY; @@ -21443,18 +18793,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetTokenResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -21463,12 +18814,11 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenR return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.token_ = token_; @@ -21477,39 +18827,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenR return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto)other); @@ -21524,17 +18841,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasToken()) { setToken(other.getToken()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -21544,7 +18858,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -21554,27 +18868,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional bytes token = 1; private com.google.protobuf.ByteString token_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes token = 1; - * @return Whether the token field is set. */ - @java.lang.Override public boolean hasToken() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bytes token = 1; - * @return The token. */ - @java.lang.Override public com.google.protobuf.ByteString getToken() { return token_; } /** * optional bytes token = 1; - * @param value The token to set. - * @return This builder for chaining. */ public Builder setToken(com.google.protobuf.ByteString value) { if (value == null) { @@ -21587,7 +18896,6 @@ public Builder setToken(com.google.protobuf.ByteString value) { } /** * optional bytes token = 1; - * @return This builder for chaining. */ public Builder clearToken() { bitField0_ = (bitField0_ & ~0x00000001); @@ -21595,132 +18903,83 @@ public Builder clearToken() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GetTokenResponseProto) } - // @@protoc_insertion_point(class_scope:GetTokenResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetTokenResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetTokenResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetTokenResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GetTokenResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GetTokenResponseProto) } - public interface LlapOutputSocketInitMessageOrBuilder extends - // @@protoc_insertion_point(interface_extends:LlapOutputSocketInitMessage) - com.google.protobuf.MessageOrBuilder { + public interface LlapOutputSocketInitMessageOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // required string fragment_id = 1; /** * required string fragment_id = 1; - * @return Whether the fragmentId field is set. */ boolean hasFragmentId(); /** * required string fragment_id = 1; - * @return The fragmentId. */ java.lang.String getFragmentId(); /** * required string fragment_id = 1; - * @return The bytes for fragmentId. */ com.google.protobuf.ByteString getFragmentIdBytes(); + // optional bytes token = 2; /** * optional bytes token = 2; - * @return Whether the token field is set. */ boolean hasToken(); /** * optional bytes token = 2; - * @return The token. */ com.google.protobuf.ByteString getToken(); } /** + * Protobuf type {@code LlapOutputSocketInitMessage} + * *
    * The message sent by external client to claim the output from the output socket.
    * 
- * - * Protobuf type {@code LlapOutputSocketInitMessage} */ public static final class LlapOutputSocketInitMessage extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:LlapOutputSocketInitMessage) - LlapOutputSocketInitMessageOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements LlapOutputSocketInitMessageOrBuilder { // Use LlapOutputSocketInitMessage.newBuilder() to construct. - private LlapOutputSocketInitMessage(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private LlapOutputSocketInitMessage(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private LlapOutputSocketInitMessage() { - fragmentId_ = ""; - token_ = com.google.protobuf.ByteString.EMPTY; + private LlapOutputSocketInitMessage(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final LlapOutputSocketInitMessage defaultInstance; + public static LlapOutputSocketInitMessage getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new LlapOutputSocketInitMessage(); + public LlapOutputSocketInitMessage getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private LlapOutputSocketInitMessage( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -21732,10 +18991,16 @@ private LlapOutputSocketInitMessage( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - fragmentId_ = bs; + fragmentId_ = input.readBytes(); break; } case 18: { @@ -21743,22 +19008,13 @@ private LlapOutputSocketInitMessage( token_ = input.readBytes(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -21769,30 +19025,41 @@ private LlapOutputSocketInitMessage( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_LlapOutputSocketInitMessage_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_LlapOutputSocketInitMessage_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public LlapOutputSocketInitMessage parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new LlapOutputSocketInitMessage(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // required string fragment_id = 1; public static final int FRAGMENT_ID_FIELD_NUMBER = 1; - private volatile java.lang.Object fragmentId_; + private java.lang.Object fragmentId_; /** * required string fragment_id = 1; - * @return Whether the fragmentId field is set. */ - @java.lang.Override public boolean hasFragmentId() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string fragment_id = 1; - * @return The fragmentId. */ - @java.lang.Override public java.lang.String getFragmentId() { java.lang.Object ref = fragmentId_; if (ref instanceof java.lang.String) { @@ -21809,9 +19076,7 @@ public java.lang.String getFragmentId() { } /** * required string fragment_id = 1; - * @return The bytes for fragmentId. */ - @java.lang.Override public com.google.protobuf.ByteString getFragmentIdBytes() { java.lang.Object ref = fragmentId_; @@ -21826,31 +19091,30 @@ public java.lang.String getFragmentId() { } } + // optional bytes token = 2; public static final int TOKEN_FIELD_NUMBER = 2; private com.google.protobuf.ByteString token_; /** * optional bytes token = 2; - * @return Whether the token field is set. */ - @java.lang.Override public boolean hasToken() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes token = 2; - * @return The token. */ - @java.lang.Override public com.google.protobuf.ByteString getToken() { return token_; } + private void initFields() { + fragmentId_ = ""; + token_ = com.google.protobuf.ByteString.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; if (!hasFragmentId()) { memoizedIsInitialized = 0; @@ -21860,36 +19124,44 @@ public final boolean isInitialized() { return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, fragmentId_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getFragmentIdBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeBytes(2, token_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, fragmentId_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getFragmentIdBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(2, token_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -21900,27 +19172,30 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage) obj; - if (hasFragmentId() != other.hasFragmentId()) return false; + boolean result = true; + result = result && (hasFragmentId() == other.hasFragmentId()); if (hasFragmentId()) { - if (!getFragmentId() - .equals(other.getFragmentId())) return false; + result = result && getFragmentId() + .equals(other.getFragmentId()); } - if (hasToken() != other.hasToken()) return false; + result = result && (hasToken() == other.hasToken()); if (hasToken()) { - if (!getToken() - .equals(other.getToken())) return false; + result = result && getToken() + .equals(other.getToken()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFragmentId()) { hash = (37 * hash) + FRAGMENT_ID_FIELD_NUMBER; hash = (53 * hash) + getFragmentId().hashCode(); @@ -21929,22 +19204,11 @@ public int hashCode() { hash = (37 * hash) + TOKEN_FIELD_NUMBER; hash = (53 * hash) + getToken().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -21968,80 +19232,65 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ll } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** + * Protobuf type {@code LlapOutputSocketInitMessage} + * *
      * The message sent by external client to claim the output from the output socket.
      * 
- * - * Protobuf type {@code LlapOutputSocketInitMessage} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:LlapOutputSocketInitMessage) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessageOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessageOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_LlapOutputSocketInitMessage_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_LlapOutputSocketInitMessage_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -22054,16 +19303,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); fragmentId_ = ""; @@ -22073,18 +19324,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_LlapOutputSocketInitMessage_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage result = buildPartial(); if (!result.isInitialized()) { @@ -22093,16 +19345,15 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutpu return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.fragmentId_ = fragmentId_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.token_ = token_; @@ -22111,39 +19362,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutpu return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage)other); @@ -22163,20 +19381,18 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasToken()) { setToken(other.getToken()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { if (!hasFragmentId()) { + return false; } return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -22186,7 +19402,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -22196,27 +19412,23 @@ public Builder mergeFrom( } private int bitField0_; + // required string fragment_id = 1; private java.lang.Object fragmentId_ = ""; /** * required string fragment_id = 1; - * @return Whether the fragmentId field is set. */ public boolean hasFragmentId() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string fragment_id = 1; - * @return The fragmentId. */ public java.lang.String getFragmentId() { java.lang.Object ref = fragmentId_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - fragmentId_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + fragmentId_ = s; return s; } else { return (java.lang.String) ref; @@ -22224,7 +19436,6 @@ public java.lang.String getFragmentId() { } /** * required string fragment_id = 1; - * @return The bytes for fragmentId. */ public com.google.protobuf.ByteString getFragmentIdBytes() { @@ -22241,8 +19452,6 @@ public java.lang.String getFragmentId() { } /** * required string fragment_id = 1; - * @param value The fragmentId to set. - * @return This builder for chaining. */ public Builder setFragmentId( java.lang.String value) { @@ -22256,7 +19465,6 @@ public Builder setFragmentId( } /** * required string fragment_id = 1; - * @return This builder for chaining. */ public Builder clearFragmentId() { bitField0_ = (bitField0_ & ~0x00000001); @@ -22266,8 +19474,6 @@ public Builder clearFragmentId() { } /** * required string fragment_id = 1; - * @param value The bytes for fragmentId to set. - * @return This builder for chaining. */ public Builder setFragmentIdBytes( com.google.protobuf.ByteString value) { @@ -22280,27 +19486,22 @@ public Builder setFragmentIdBytes( return this; } + // optional bytes token = 2; private com.google.protobuf.ByteString token_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes token = 2; - * @return Whether the token field is set. */ - @java.lang.Override public boolean hasToken() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional bytes token = 2; - * @return The token. */ - @java.lang.Override public com.google.protobuf.ByteString getToken() { return token_; } /** * optional bytes token = 2; - * @param value The token to set. - * @return This builder for chaining. */ public Builder setToken(com.google.protobuf.ByteString value) { if (value == null) { @@ -22313,7 +19514,6 @@ public Builder setToken(com.google.protobuf.ByteString value) { } /** * optional bytes token = 2; - * @return This builder for chaining. */ public Builder clearToken() { bitField0_ = (bitField0_ & ~0x00000002); @@ -22321,98 +19521,54 @@ public Builder clearToken() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:LlapOutputSocketInitMessage) } - // @@protoc_insertion_point(class_scope:LlapOutputSocketInitMessage) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public LlapOutputSocketInitMessage parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new LlapOutputSocketInitMessage(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.LlapOutputSocketInitMessage getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new LlapOutputSocketInitMessage(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:LlapOutputSocketInitMessage) } - public interface PurgeCacheRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:PurgeCacheRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface PurgeCacheRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code PurgeCacheRequestProto} */ public static final class PurgeCacheRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:PurgeCacheRequestProto) - PurgeCacheRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements PurgeCacheRequestProtoOrBuilder { // Use PurgeCacheRequestProto.newBuilder() to construct. - private PurgeCacheRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private PurgeCacheRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private PurgeCacheRequestProto() { + private PurgeCacheRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PurgeCacheRequestProto defaultInstance; + public static PurgeCacheRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new PurgeCacheRequestProto(); + public PurgeCacheRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private PurgeCacheRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -22424,8 +19580,8 @@ private PurgeCacheRequestProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -22434,11 +19590,9 @@ private PurgeCacheRequestProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -22449,42 +19603,63 @@ private PurgeCacheRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PurgeCacheRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PurgeCacheRequestProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -22495,33 +19670,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -22545,59 +19712,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Pu } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -22605,16 +19759,14 @@ protected Builder newBuilderForType( * Protobuf type {@code PurgeCacheRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:PurgeCacheRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -22627,33 +19779,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -22662,46 +19817,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCach return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto)other); @@ -22713,17 +19834,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -22733,7 +19851,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -22741,71 +19859,28 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:PurgeCacheRequestProto) } - // @@protoc_insertion_point(class_scope:PurgeCacheRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public PurgeCacheRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PurgeCacheRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new PurgeCacheRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:PurgeCacheRequestProto) } - public interface PurgeCacheResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:PurgeCacheResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface PurgeCacheResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional int64 purged_memory_bytes = 1; /** * optional int64 purged_memory_bytes = 1; - * @return Whether the purgedMemoryBytes field is set. */ boolean hasPurgedMemoryBytes(); /** * optional int64 purged_memory_bytes = 1; - * @return The purgedMemoryBytes. */ long getPurgedMemoryBytes(); } @@ -22813,37 +19888,35 @@ public interface PurgeCacheResponseProtoOrBuilder extends * Protobuf type {@code PurgeCacheResponseProto} */ public static final class PurgeCacheResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:PurgeCacheResponseProto) - PurgeCacheResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements PurgeCacheResponseProtoOrBuilder { // Use PurgeCacheResponseProto.newBuilder() to construct. - private PurgeCacheResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private PurgeCacheResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private PurgeCacheResponseProto() { + private PurgeCacheResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final PurgeCacheResponseProto defaultInstance; + public static PurgeCacheResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new PurgeCacheResponseProto(); + public PurgeCacheResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private PurgeCacheResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -22855,27 +19928,25 @@ private PurgeCacheResponseProto( case 0: done = true; break; - case 8: { - bitField0_ |= 0x00000001; - purgedMemoryBytes_ = input.readInt64(); - break; - } default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; } + case 8: { + bitField0_ |= 0x00000001; + purgedMemoryBytes_ = input.readInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -22886,69 +19957,88 @@ private PurgeCacheResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public PurgeCacheResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new PurgeCacheResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional int64 purged_memory_bytes = 1; public static final int PURGED_MEMORY_BYTES_FIELD_NUMBER = 1; private long purgedMemoryBytes_; /** * optional int64 purged_memory_bytes = 1; - * @return Whether the purgedMemoryBytes field is set. */ - @java.lang.Override public boolean hasPurgedMemoryBytes() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int64 purged_memory_bytes = 1; - * @return The purgedMemoryBytes. */ - @java.lang.Override public long getPurgedMemoryBytes() { return purgedMemoryBytes_; } + private void initFields() { + purgedMemoryBytes_ = 0L; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, purgedMemoryBytes_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, purgedMemoryBytes_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -22959,43 +20049,34 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) obj; - if (hasPurgedMemoryBytes() != other.hasPurgedMemoryBytes()) return false; + boolean result = true; + result = result && (hasPurgedMemoryBytes() == other.hasPurgedMemoryBytes()); if (hasPurgedMemoryBytes()) { - if (getPurgedMemoryBytes() - != other.getPurgedMemoryBytes()) return false; + result = result && (getPurgedMemoryBytes() + == other.getPurgedMemoryBytes()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasPurgedMemoryBytes()) { hash = (37 * hash) + PURGED_MEMORY_BYTES_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getPurgedMemoryBytes()); + hash = (53 * hash) + hashLong(getPurgedMemoryBytes()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -23019,59 +20100,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Pu } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -23079,16 +20147,14 @@ protected Builder newBuilderForType( * Protobuf type {@code PurgeCacheResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:PurgeCacheResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -23101,16 +20167,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); purgedMemoryBytes_ = 0L; @@ -23118,18 +20186,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_PurgeCacheResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -23138,53 +20207,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCach return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.purgedMemoryBytes_ = purgedMemoryBytes_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } + result.purgedMemoryBytes_ = purgedMemoryBytes_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto)other); @@ -23199,17 +20234,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasPurgedMemoryBytes()) { setPurgedMemoryBytes(other.getPurgedMemoryBytes()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -23219,7 +20251,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -23229,27 +20261,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional int64 purged_memory_bytes = 1; private long purgedMemoryBytes_ ; /** * optional int64 purged_memory_bytes = 1; - * @return Whether the purgedMemoryBytes field is set. */ - @java.lang.Override public boolean hasPurgedMemoryBytes() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int64 purged_memory_bytes = 1; - * @return The purgedMemoryBytes. */ - @java.lang.Override public long getPurgedMemoryBytes() { return purgedMemoryBytes_; } /** * optional int64 purged_memory_bytes = 1; - * @param value The purgedMemoryBytes to set. - * @return This builder for chaining. */ public Builder setPurgedMemoryBytes(long value) { bitField0_ |= 0x00000001; @@ -23259,7 +20286,6 @@ public Builder setPurgedMemoryBytes(long value) { } /** * optional int64 purged_memory_bytes = 1; - * @return This builder for chaining. */ public Builder clearPurgedMemoryBytes() { bitField0_ = (bitField0_ & ~0x00000001); @@ -23267,88 +20293,43 @@ public Builder clearPurgedMemoryBytes() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:PurgeCacheResponseProto) } - // @@protoc_insertion_point(class_scope:PurgeCacheResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public PurgeCacheResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new PurgeCacheResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.PurgeCacheResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new PurgeCacheResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:PurgeCacheResponseProto) } - public interface MapEntryOrBuilder extends - // @@protoc_insertion_point(interface_extends:MapEntry) - com.google.protobuf.MessageOrBuilder { + public interface MapEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string key = 1; /** * optional string key = 1; - * @return Whether the key field is set. */ boolean hasKey(); /** * optional string key = 1; - * @return The key. */ java.lang.String getKey(); /** * optional string key = 1; - * @return The bytes for key. */ com.google.protobuf.ByteString getKeyBytes(); + // optional int64 value = 2; /** * optional int64 value = 2; - * @return Whether the value field is set. */ boolean hasValue(); /** * optional int64 value = 2; - * @return The value. */ long getValue(); } @@ -23356,38 +20337,35 @@ public interface MapEntryOrBuilder extends * Protobuf type {@code MapEntry} */ public static final class MapEntry extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:MapEntry) - MapEntryOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements MapEntryOrBuilder { // Use MapEntry.newBuilder() to construct. - private MapEntry(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private MapEntry(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private MapEntry() { - key_ = ""; + private MapEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MapEntry defaultInstance; + public static MapEntry getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new MapEntry(); + public MapEntry getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private MapEntry( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -23399,10 +20377,16 @@ private MapEntry( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - key_ = bs; + key_ = input.readBytes(); break; } case 16: { @@ -23410,22 +20394,13 @@ private MapEntry( value_ = input.readInt64(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -23436,30 +20411,41 @@ private MapEntry( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MapEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MapEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string key = 1; public static final int KEY_FIELD_NUMBER = 1; - private volatile java.lang.Object key_; + private java.lang.Object key_; /** * optional string key = 1; - * @return Whether the key field is set. */ - @java.lang.Override public boolean hasKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string key = 1; - * @return The key. */ - @java.lang.Override public java.lang.String getKey() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { @@ -23476,9 +20462,7 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @return The bytes for key. */ - @java.lang.Override public com.google.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; @@ -23493,66 +20477,73 @@ public java.lang.String getKey() { } } + // optional int64 value = 2; public static final int VALUE_FIELD_NUMBER = 2; private long value_; /** * optional int64 value = 2; - * @return Whether the value field is set. */ - @java.lang.Override public boolean hasValue() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 value = 2; - * @return The value. */ - @java.lang.Override public long getValue() { return value_; } + private void initFields() { + key_ = ""; + value_ = 0L; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, key_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, value_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, key_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, value_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -23563,52 +20554,43 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry) obj; - if (hasKey() != other.hasKey()) return false; + boolean result = true; + result = result && (hasKey() == other.hasKey()); if (hasKey()) { - if (!getKey() - .equals(other.getKey())) return false; + result = result && getKey() + .equals(other.getKey()); } - if (hasValue() != other.hasValue()) return false; + result = result && (hasValue() == other.hasValue()); if (hasValue()) { - if (getValue() - != other.getValue()) return false; + result = result && (getValue() + == other.getValue()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasKey()) { hash = (37 * hash) + KEY_FIELD_NUMBER; hash = (53 * hash) + getKey().hashCode(); } if (hasValue()) { hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getValue()); + hash = (53 * hash) + hashLong(getValue()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -23632,59 +20614,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ma } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -23692,16 +20661,14 @@ protected Builder newBuilderForType( * Protobuf type {@code MapEntry} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:MapEntry) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -23714,16 +20681,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); key_ = ""; @@ -23733,18 +20702,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_MapEntry_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry result = buildPartial(); if (!result.isInitialized()) { @@ -23753,57 +20723,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.key_ = key_; - if (((from_bitField0_ & 0x00000002) != 0)) { - result.value_ = value_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } + result.value_ = value_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry)other); @@ -23823,17 +20759,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasValue()) { setValue(other.getValue()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -23843,7 +20776,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -23853,27 +20786,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string key = 1; private java.lang.Object key_ = ""; /** * optional string key = 1; - * @return Whether the key field is set. */ public boolean hasKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string key = 1; - * @return The key. */ public java.lang.String getKey() { java.lang.Object ref = key_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; return s; } else { return (java.lang.String) ref; @@ -23881,7 +20810,6 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @return The bytes for key. */ public com.google.protobuf.ByteString getKeyBytes() { @@ -23898,8 +20826,6 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @param value The key to set. - * @return This builder for chaining. */ public Builder setKey( java.lang.String value) { @@ -23913,7 +20839,6 @@ public Builder setKey( } /** * optional string key = 1; - * @return This builder for chaining. */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); @@ -23923,8 +20848,6 @@ public Builder clearKey() { } /** * optional string key = 1; - * @param value The bytes for key to set. - * @return This builder for chaining. */ public Builder setKeyBytes( com.google.protobuf.ByteString value) { @@ -23937,27 +20860,22 @@ public Builder setKeyBytes( return this; } + // optional int64 value = 2; private long value_ ; /** * optional int64 value = 2; - * @return Whether the value field is set. */ - @java.lang.Override public boolean hasValue() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 value = 2; - * @return The value. */ - @java.lang.Override public long getValue() { return value_; } /** * optional int64 value = 2; - * @param value The value to set. - * @return This builder for chaining. */ public Builder setValue(long value) { bitField0_ |= 0x00000002; @@ -23967,7 +20885,6 @@ public Builder setValue(long value) { } /** * optional int64 value = 2; - * @return This builder for chaining. */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); @@ -23975,98 +20892,54 @@ public Builder clearValue() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:MapEntry) } - // @@protoc_insertion_point(class_scope:MapEntry) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public MapEntry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new MapEntry(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new MapEntry(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:MapEntry) } - public interface GetDaemonMetricsRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GetDaemonMetricsRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface GetDaemonMetricsRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code GetDaemonMetricsRequestProto} */ public static final class GetDaemonMetricsRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GetDaemonMetricsRequestProto) - GetDaemonMetricsRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GetDaemonMetricsRequestProtoOrBuilder { // Use GetDaemonMetricsRequestProto.newBuilder() to construct. - private GetDaemonMetricsRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GetDaemonMetricsRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GetDaemonMetricsRequestProto() { + private GetDaemonMetricsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetDaemonMetricsRequestProto defaultInstance; + public static GetDaemonMetricsRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GetDaemonMetricsRequestProto(); + public GetDaemonMetricsRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GetDaemonMetricsRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -24078,8 +20951,8 @@ private GetDaemonMetricsRequestProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -24088,11 +20961,9 @@ private GetDaemonMetricsRequestProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -24103,42 +20974,63 @@ private GetDaemonMetricsRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetDaemonMetricsRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetDaemonMetricsRequestProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -24149,33 +21041,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -24199,59 +21083,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ge } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -24259,16 +21130,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GetDaemonMetricsRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GetDaemonMetricsRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -24281,33 +21150,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -24316,46 +21188,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemon return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto)other); @@ -24367,17 +21205,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -24387,7 +21222,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -24395,63 +21230,22 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GetDaemonMetricsRequestProto) } - // @@protoc_insertion_point(class_scope:GetDaemonMetricsRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetDaemonMetricsRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetDaemonMetricsRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GetDaemonMetricsRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GetDaemonMetricsRequestProto) } - public interface GetDaemonMetricsResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GetDaemonMetricsResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface GetDaemonMetricsResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // repeated .MapEntry metrics = 1; /** * repeated .MapEntry metrics = 1; */ @@ -24480,38 +21274,35 @@ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilde * Protobuf type {@code GetDaemonMetricsResponseProto} */ public static final class GetDaemonMetricsResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GetDaemonMetricsResponseProto) - GetDaemonMetricsResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GetDaemonMetricsResponseProtoOrBuilder { // Use GetDaemonMetricsResponseProto.newBuilder() to construct. - private GetDaemonMetricsResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GetDaemonMetricsResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GetDaemonMetricsResponseProto() { - metrics_ = java.util.Collections.emptyList(); + private GetDaemonMetricsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetDaemonMetricsResponseProto defaultInstance; + public static GetDaemonMetricsResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GetDaemonMetricsResponseProto(); + public GetDaemonMetricsResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GetDaemonMetricsResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -24523,33 +21314,30 @@ private GetDaemonMetricsResponseProto( case 0: done = true; break; - case 10: { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - metrics_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; } - metrics_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.PARSER, extensionRegistry)); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + metrics_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } + metrics_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { metrics_ = java.util.Collections.unmodifiableList(metrics_); } this.unknownFields = unknownFields.build(); @@ -24561,27 +21349,40 @@ private GetDaemonMetricsResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetDaemonMetricsResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetDaemonMetricsResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .MapEntry metrics = 1; public static final int METRICS_FIELD_NUMBER = 1; private java.util.List metrics_; /** * repeated .MapEntry metrics = 1; */ - @java.lang.Override public java.util.List getMetricsList() { return metrics_; } /** * repeated .MapEntry metrics = 1; */ - @java.lang.Override public java.util.List getMetricsOrBuilderList() { return metrics_; @@ -24589,49 +21390,47 @@ public java.util.Listrepeated .MapEntry metrics = 1; */ - @java.lang.Override public int getMetricsCount() { return metrics_.size(); } /** * repeated .MapEntry metrics = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry getMetrics(int index) { return metrics_.get(index); } /** * repeated .MapEntry metrics = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder getMetricsOrBuilder( int index) { return metrics_.get(index); } + private void initFields() { + metrics_ = java.util.Collections.emptyList(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); for (int i = 0; i < metrics_.size(); i++) { output.writeMessage(1, metrics_.get(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; @@ -24639,11 +21438,18 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, metrics_.get(i)); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -24654,39 +21460,31 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) obj; - if (!getMetricsList() - .equals(other.getMetricsList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && getMetricsList() + .equals(other.getMetricsList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (getMetricsCount() > 0) { hash = (37 * hash) + METRICS_FIELD_NUMBER; hash = (53 * hash) + getMetricsList().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -24710,59 +21508,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ge } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -24770,16 +21555,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GetDaemonMetricsResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GetDaemonMetricsResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -24792,17 +21575,19 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getMetricsFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (metricsBuilder_ == null) { @@ -24814,18 +21599,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetDaemonMetricsResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -24834,12 +21620,11 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemon return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto(this); int from_bitField0_ = bitField0_; if (metricsBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { metrics_ = java.util.Collections.unmodifiableList(metrics_); bitField0_ = (bitField0_ & ~0x00000001); } @@ -24851,39 +21636,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemon return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto)other); @@ -24914,24 +21666,21 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc metrics_ = other.metrics_; bitField0_ = (bitField0_ & ~0x00000001); metricsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getMetricsFieldBuilder() : null; } else { metricsBuilder_.addAllMessages(other.metrics_); } } } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -24941,7 +21690,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -24951,16 +21700,17 @@ public Builder mergeFrom( } private int bitField0_; + // repeated .MapEntry metrics = 1; private java.util.List metrics_ = java.util.Collections.emptyList(); private void ensureMetricsIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { metrics_ = new java.util.ArrayList(metrics_); bitField0_ |= 0x00000001; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder> metricsBuilder_; /** @@ -25092,8 +21842,7 @@ public Builder addAllMetrics( java.lang.Iterable values) { if (metricsBuilder_ == null) { ensureMetricsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, metrics_); + super.addAll(values, metrics_); onChanged(); } else { metricsBuilder_.addAllMessages(values); @@ -25176,96 +21925,52 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry. getMetricsBuilderList() { return getMetricsFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder> getMetricsFieldBuilder() { if (metricsBuilder_ == null) { - metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + metricsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.MapEntryOrBuilder>( metrics_, - ((bitField0_ & 0x00000001) != 0), + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); metrics_ = null; } return metricsBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GetDaemonMetricsResponseProto) } - // @@protoc_insertion_point(class_scope:GetDaemonMetricsResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetDaemonMetricsResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetDaemonMetricsResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetDaemonMetricsResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GetDaemonMetricsResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GetDaemonMetricsResponseProto) } - public interface SetCapacityRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:SetCapacityRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface SetCapacityRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional int32 executorNum = 1; /** * optional int32 executorNum = 1; - * @return Whether the executorNum field is set. */ boolean hasExecutorNum(); /** * optional int32 executorNum = 1; - * @return The executorNum. */ int getExecutorNum(); + // optional int32 queueSize = 2; /** * optional int32 queueSize = 2; - * @return Whether the queueSize field is set. */ boolean hasQueueSize(); /** * optional int32 queueSize = 2; - * @return The queueSize. */ int getQueueSize(); } @@ -25273,37 +21978,35 @@ public interface SetCapacityRequestProtoOrBuilder extends * Protobuf type {@code SetCapacityRequestProto} */ public static final class SetCapacityRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SetCapacityRequestProto) - SetCapacityRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SetCapacityRequestProtoOrBuilder { // Use SetCapacityRequestProto.newBuilder() to construct. - private SetCapacityRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SetCapacityRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SetCapacityRequestProto() { + private SetCapacityRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetCapacityRequestProto defaultInstance; + public static SetCapacityRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SetCapacityRequestProto(); + public SetCapacityRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SetCapacityRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -25315,6 +22018,13 @@ private SetCapacityRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 8: { bitField0_ |= 0x00000001; executorNum_ = input.readInt32(); @@ -25325,22 +22035,13 @@ private SetCapacityRequestProto( queueSize_ = input.readInt32(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -25351,95 +22052,112 @@ private SetCapacityRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetCapacityRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetCapacityRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional int32 executorNum = 1; public static final int EXECUTORNUM_FIELD_NUMBER = 1; private int executorNum_; /** * optional int32 executorNum = 1; - * @return Whether the executorNum field is set. */ - @java.lang.Override public boolean hasExecutorNum() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int32 executorNum = 1; - * @return The executorNum. */ - @java.lang.Override public int getExecutorNum() { return executorNum_; } + // optional int32 queueSize = 2; public static final int QUEUESIZE_FIELD_NUMBER = 2; private int queueSize_; /** * optional int32 queueSize = 2; - * @return Whether the queueSize field is set. */ - @java.lang.Override public boolean hasQueueSize() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 queueSize = 2; - * @return The queueSize. */ - @java.lang.Override public int getQueueSize() { return queueSize_; } + private void initFields() { + executorNum_ = 0; + queueSize_ = 0; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt32(1, executorNum_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt32(2, queueSize_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(1, executorNum_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(2, queueSize_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -25450,27 +22168,30 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto) obj; - if (hasExecutorNum() != other.hasExecutorNum()) return false; + boolean result = true; + result = result && (hasExecutorNum() == other.hasExecutorNum()); if (hasExecutorNum()) { - if (getExecutorNum() - != other.getExecutorNum()) return false; + result = result && (getExecutorNum() + == other.getExecutorNum()); } - if (hasQueueSize() != other.hasQueueSize()) return false; + result = result && (hasQueueSize() == other.hasQueueSize()); if (hasQueueSize()) { - if (getQueueSize() - != other.getQueueSize()) return false; + result = result && (getQueueSize() + == other.getQueueSize()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasExecutorNum()) { hash = (37 * hash) + EXECUTORNUM_FIELD_NUMBER; hash = (53 * hash) + getExecutorNum(); @@ -25479,22 +22200,11 @@ public int hashCode() { hash = (37 * hash) + QUEUESIZE_FIELD_NUMBER; hash = (53 * hash) + getQueueSize(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -25518,59 +22228,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Se } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -25578,16 +22275,14 @@ protected Builder newBuilderForType( * Protobuf type {@code SetCapacityRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SetCapacityRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -25600,16 +22295,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); executorNum_ = 0; @@ -25619,18 +22316,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -25639,57 +22337,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapaci return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.executorNum_ = executorNum_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { - result.queueSize_ = queueSize_; + result.executorNum_ = executorNum_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } + result.queueSize_ = queueSize_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto)other); @@ -25707,17 +22371,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasQueueSize()) { setQueueSize(other.getQueueSize()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -25727,7 +22388,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -25737,27 +22398,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional int32 executorNum = 1; private int executorNum_ ; /** * optional int32 executorNum = 1; - * @return Whether the executorNum field is set. */ - @java.lang.Override public boolean hasExecutorNum() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int32 executorNum = 1; - * @return The executorNum. */ - @java.lang.Override public int getExecutorNum() { return executorNum_; } /** * optional int32 executorNum = 1; - * @param value The executorNum to set. - * @return This builder for chaining. */ public Builder setExecutorNum(int value) { bitField0_ |= 0x00000001; @@ -25767,7 +22423,6 @@ public Builder setExecutorNum(int value) { } /** * optional int32 executorNum = 1; - * @return This builder for chaining. */ public Builder clearExecutorNum() { bitField0_ = (bitField0_ & ~0x00000001); @@ -25776,27 +22431,22 @@ public Builder clearExecutorNum() { return this; } + // optional int32 queueSize = 2; private int queueSize_ ; /** * optional int32 queueSize = 2; - * @return Whether the queueSize field is set. */ - @java.lang.Override public boolean hasQueueSize() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int32 queueSize = 2; - * @return The queueSize. */ - @java.lang.Override public int getQueueSize() { return queueSize_; } /** * optional int32 queueSize = 2; - * @param value The queueSize to set. - * @return This builder for chaining. */ public Builder setQueueSize(int value) { bitField0_ |= 0x00000002; @@ -25806,7 +22456,6 @@ public Builder setQueueSize(int value) { } /** * optional int32 queueSize = 2; - * @return This builder for chaining. */ public Builder clearQueueSize() { bitField0_ = (bitField0_ & ~0x00000002); @@ -25814,98 +22463,54 @@ public Builder clearQueueSize() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SetCapacityRequestProto) } - // @@protoc_insertion_point(class_scope:SetCapacityRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SetCapacityRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SetCapacityRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SetCapacityRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SetCapacityRequestProto) } - public interface SetCapacityResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:SetCapacityResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface SetCapacityResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code SetCapacityResponseProto} */ public static final class SetCapacityResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:SetCapacityResponseProto) - SetCapacityResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements SetCapacityResponseProtoOrBuilder { // Use SetCapacityResponseProto.newBuilder() to construct. - private SetCapacityResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private SetCapacityResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private SetCapacityResponseProto() { + private SetCapacityResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final SetCapacityResponseProto defaultInstance; + public static SetCapacityResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new SetCapacityResponseProto(); + public SetCapacityResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private SetCapacityResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -25917,8 +22522,8 @@ private SetCapacityResponseProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -25927,11 +22532,9 @@ private SetCapacityResponseProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -25942,42 +22545,63 @@ private SetCapacityResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public SetCapacityResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new SetCapacityResponseProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -25988,33 +22612,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -26038,59 +22654,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Se } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -26098,16 +22701,14 @@ protected Builder newBuilderForType( * Protobuf type {@code SetCapacityResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:SetCapacityResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -26120,33 +22721,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_SetCapacityResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -26155,46 +22759,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapaci return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto)other); @@ -26206,17 +22776,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -26226,7 +22793,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -26234,80 +22801,37 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:SetCapacityResponseProto) } - // @@protoc_insertion_point(class_scope:SetCapacityResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public SetCapacityResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new SetCapacityResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.SetCapacityResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new SetCapacityResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:SetCapacityResponseProto) } - public interface EvictEntityRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:EvictEntityRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface EvictEntityRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // required string db_name = 1; /** * required string db_name = 1; - * @return Whether the dbName field is set. */ boolean hasDbName(); /** * required string db_name = 1; - * @return The dbName. */ java.lang.String getDbName(); /** * required string db_name = 1; - * @return The bytes for dbName. */ com.google.protobuf.ByteString getDbNameBytes(); + // repeated .TableProto table = 2; /** * repeated .TableProto table = 2; */ @@ -26333,46 +22857,42 @@ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuil int index); } /** + * Protobuf type {@code EvictEntityRequestProto} + * *
    * Used for proactive eviction request. Must contain one DB name, and optionally table information.
    * 
- * - * Protobuf type {@code EvictEntityRequestProto} */ public static final class EvictEntityRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:EvictEntityRequestProto) - EvictEntityRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements EvictEntityRequestProtoOrBuilder { // Use EvictEntityRequestProto.newBuilder() to construct. - private EvictEntityRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private EvictEntityRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private EvictEntityRequestProto() { - dbName_ = ""; - table_ = java.util.Collections.emptyList(); + private EvictEntityRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EvictEntityRequestProto defaultInstance; + public static EvictEntityRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new EvictEntityRequestProto(); + public EvictEntityRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private EvictEntityRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -26384,39 +22904,35 @@ private EvictEntityRequestProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - dbName_ = bs; + dbName_ = input.readBytes(); break; } case 18: { - if (!((mutable_bitField0_ & 0x00000002) != 0)) { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { table_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000002; } - table_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.PARSER, extensionRegistry)); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + table_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) != 0)) { + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { table_ = java.util.Collections.unmodifiableList(table_); } this.unknownFields = unknownFields.build(); @@ -26428,30 +22944,41 @@ private EvictEntityRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EvictEntityRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EvictEntityRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // required string db_name = 1; public static final int DB_NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object dbName_; + private java.lang.Object dbName_; /** * required string db_name = 1; - * @return Whether the dbName field is set. */ - @java.lang.Override public boolean hasDbName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string db_name = 1; - * @return The dbName. */ - @java.lang.Override public java.lang.String getDbName() { java.lang.Object ref = dbName_; if (ref instanceof java.lang.String) { @@ -26468,9 +22995,7 @@ public java.lang.String getDbName() { } /** * required string db_name = 1; - * @return The bytes for dbName. */ - @java.lang.Override public com.google.protobuf.ByteString getDbNameBytes() { java.lang.Object ref = dbName_; @@ -26485,19 +23010,18 @@ public java.lang.String getDbName() { } } + // repeated .TableProto table = 2; public static final int TABLE_FIELD_NUMBER = 2; private java.util.List table_; /** * repeated .TableProto table = 2; */ - @java.lang.Override public java.util.List getTableList() { return table_; } /** * repeated .TableProto table = 2; */ - @java.lang.Override public java.util.List getTableOrBuilderList() { return table_; @@ -26505,32 +23029,31 @@ public java.util.Listrepeated .TableProto table = 2; */ - @java.lang.Override public int getTableCount() { return table_.size(); } /** * repeated .TableProto table = 2; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getTable(int index) { return table_.get(index); } /** * repeated .TableProto table = 2; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder getTableOrBuilder( int index) { return table_.get(index); } + private void initFields() { + dbName_ = ""; + table_ = java.util.Collections.emptyList(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; if (!hasDbName()) { memoizedIsInitialized = 0; @@ -26546,36 +23069,44 @@ public final boolean isInitialized() { return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, dbName_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getDbNameBytes()); } for (int i = 0; i < table_.size(); i++) { output.writeMessage(2, table_.get(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, dbName_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getDbNameBytes()); } for (int i = 0; i < table_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(2, table_.get(i)); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -26586,24 +23117,27 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto) obj; - if (hasDbName() != other.hasDbName()) return false; + boolean result = true; + result = result && (hasDbName() == other.hasDbName()); if (hasDbName()) { - if (!getDbName() - .equals(other.getDbName())) return false; + result = result && getDbName() + .equals(other.getDbName()); } - if (!getTableList() - .equals(other.getTableList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && getTableList() + .equals(other.getTableList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasDbName()) { hash = (37 * hash) + DB_NAME_FIELD_NUMBER; hash = (53 * hash) + getDbName().hashCode(); @@ -26612,22 +23146,11 @@ public int hashCode() { hash = (37 * hash) + TABLE_FIELD_NUMBER; hash = (53 * hash) + getTableList().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -26651,80 +23174,65 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ev } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** + * Protobuf type {@code EvictEntityRequestProto} + * *
      * Used for proactive eviction request. Must contain one DB name, and optionally table information.
      * 
- * - * Protobuf type {@code EvictEntityRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:EvictEntityRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -26737,17 +23245,19 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getTableFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); dbName_ = ""; @@ -26761,18 +23271,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -26781,17 +23292,16 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEnti return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.dbName_ = dbName_; if (tableBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { table_ = java.util.Collections.unmodifiableList(table_); bitField0_ = (bitField0_ & ~0x00000002); } @@ -26804,39 +23314,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEnti return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto)other); @@ -26872,32 +23349,31 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc table_ = other.table_; bitField0_ = (bitField0_ & ~0x00000002); tableBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getTableFieldBuilder() : null; } else { tableBuilder_.addAllMessages(other.table_); } } } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { if (!hasDbName()) { + return false; } for (int i = 0; i < getTableCount(); i++) { if (!getTable(i).isInitialized()) { + return false; } } return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -26907,7 +23383,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -26917,27 +23393,23 @@ public Builder mergeFrom( } private int bitField0_; + // required string db_name = 1; private java.lang.Object dbName_ = ""; /** * required string db_name = 1; - * @return Whether the dbName field is set. */ public boolean hasDbName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string db_name = 1; - * @return The dbName. */ public java.lang.String getDbName() { java.lang.Object ref = dbName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - dbName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + dbName_ = s; return s; } else { return (java.lang.String) ref; @@ -26945,7 +23417,6 @@ public java.lang.String getDbName() { } /** * required string db_name = 1; - * @return The bytes for dbName. */ public com.google.protobuf.ByteString getDbNameBytes() { @@ -26962,8 +23433,6 @@ public java.lang.String getDbName() { } /** * required string db_name = 1; - * @param value The dbName to set. - * @return This builder for chaining. */ public Builder setDbName( java.lang.String value) { @@ -26977,7 +23446,6 @@ public Builder setDbName( } /** * required string db_name = 1; - * @return This builder for chaining. */ public Builder clearDbName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -26987,8 +23455,6 @@ public Builder clearDbName() { } /** * required string db_name = 1; - * @param value The bytes for dbName to set. - * @return This builder for chaining. */ public Builder setDbNameBytes( com.google.protobuf.ByteString value) { @@ -27001,16 +23467,17 @@ public Builder setDbNameBytes( return this; } + // repeated .TableProto table = 2; private java.util.List table_ = java.util.Collections.emptyList(); private void ensureTableIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { table_ = new java.util.ArrayList(table_); bitField0_ |= 0x00000002; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder> tableBuilder_; /** @@ -27142,8 +23609,7 @@ public Builder addAllTable( java.lang.Iterable values) { if (tableBuilder_ == null) { ensureTableIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, table_); + super.addAll(values, table_); onChanged(); } else { tableBuilder_.addAllMessages(values); @@ -27226,189 +23692,130 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProt getTableBuilderList() { return getTableFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder> getTableFieldBuilder() { if (tableBuilder_ == null) { - tableBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + tableBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder>( table_, - ((bitField0_ & 0x00000002) != 0), + ((bitField0_ & 0x00000002) == 0x00000002), getParentForChildren(), isClean()); table_ = null; } return tableBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:EvictEntityRequestProto) } - // @@protoc_insertion_point(class_scope:EvictEntityRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public EvictEntityRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new EvictEntityRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new EvictEntityRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:EvictEntityRequestProto) } - public interface TableProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:TableProto) - com.google.protobuf.MessageOrBuilder { + public interface TableProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // required string table_name = 1; /** * required string table_name = 1; - * @return Whether the tableName field is set. */ boolean hasTableName(); /** * required string table_name = 1; - * @return The tableName. */ java.lang.String getTableName(); /** * required string table_name = 1; - * @return The bytes for tableName. */ com.google.protobuf.ByteString getTableNameBytes(); + // repeated string part_key = 2; /** * repeated string part_key = 2; - * @return A list containing the partKey. */ java.util.List - getPartKeyList(); + getPartKeyList(); /** * repeated string part_key = 2; - * @return The count of partKey. */ int getPartKeyCount(); /** * repeated string part_key = 2; - * @param index The index of the element to return. - * @return The partKey at the given index. */ java.lang.String getPartKey(int index); /** * repeated string part_key = 2; - * @param index The index of the value to return. - * @return The bytes of the partKey at the given index. */ com.google.protobuf.ByteString getPartKeyBytes(int index); + // repeated string part_val = 3; /** * repeated string part_val = 3; - * @return A list containing the partVal. */ java.util.List - getPartValList(); + getPartValList(); /** * repeated string part_val = 3; - * @return The count of partVal. */ int getPartValCount(); /** * repeated string part_val = 3; - * @param index The index of the element to return. - * @return The partVal at the given index. */ java.lang.String getPartVal(int index); /** * repeated string part_val = 3; - * @param index The index of the value to return. - * @return The bytes of the partVal at the given index. */ com.google.protobuf.ByteString getPartValBytes(int index); } /** + * Protobuf type {@code TableProto} + * *
    * Used in EvictEntityRequestProto, can be used for non-partitioned and partitioned tables too.
    * For the latter part_key contains only the keys, part_val has the values for all partitions on all keys:
    * e.g.: for partitions pk0=p00/pk1=p01/pk2=p02 and pk0=p10/pk1=p11/pk2=p12
    * part_key: [pk0, pk1, pk2], part_val: [p00, p01, p02, p10, p11, p12]
    * 
- * - * Protobuf type {@code TableProto} */ public static final class TableProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:TableProto) - TableProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements TableProtoOrBuilder { // Use TableProto.newBuilder() to construct. - private TableProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private TableProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private TableProto() { - tableName_ = ""; - partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; - partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private TableProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final TableProto defaultInstance; + public static TableProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new TableProto(); + public TableProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private TableProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -27420,52 +23827,47 @@ private TableProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - tableName_ = bs; + tableName_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000002) != 0)) { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { partKey_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000002; } - partKey_.add(bs); + partKey_.add(input.readBytes()); break; } case 26: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000004) != 0)) { + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { partVal_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000004; } - partVal_.add(bs); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + partVal_.add(input.readBytes()); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) != 0)) { - partKey_ = partKey_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + partKey_ = new com.google.protobuf.UnmodifiableLazyStringList(partKey_); } - if (((mutable_bitField0_ & 0x00000004) != 0)) { - partVal_ = partVal_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + partVal_ = new com.google.protobuf.UnmodifiableLazyStringList(partVal_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -27476,30 +23878,41 @@ private TableProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public TableProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new TableProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // required string table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object tableName_; + private java.lang.Object tableName_; /** * required string table_name = 1; - * @return Whether the tableName field is set. */ - @java.lang.Override public boolean hasTableName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string table_name = 1; - * @return The tableName. */ - @java.lang.Override public java.lang.String getTableName() { java.lang.Object ref = tableName_; if (ref instanceof java.lang.String) { @@ -27516,9 +23929,7 @@ public java.lang.String getTableName() { } /** * required string table_name = 1; - * @return The bytes for tableName. */ - @java.lang.Override public com.google.protobuf.ByteString getTableNameBytes() { java.lang.Object ref = tableName_; @@ -27533,82 +23944,75 @@ public java.lang.String getTableName() { } } + // repeated string part_key = 2; public static final int PART_KEY_FIELD_NUMBER = 2; private com.google.protobuf.LazyStringList partKey_; /** * repeated string part_key = 2; - * @return A list containing the partKey. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getPartKeyList() { return partKey_; } /** * repeated string part_key = 2; - * @return The count of partKey. */ public int getPartKeyCount() { return partKey_.size(); } /** * repeated string part_key = 2; - * @param index The index of the element to return. - * @return The partKey at the given index. */ public java.lang.String getPartKey(int index) { return partKey_.get(index); } /** * repeated string part_key = 2; - * @param index The index of the value to return. - * @return The bytes of the partKey at the given index. */ public com.google.protobuf.ByteString getPartKeyBytes(int index) { return partKey_.getByteString(index); } + // repeated string part_val = 3; public static final int PART_VAL_FIELD_NUMBER = 3; private com.google.protobuf.LazyStringList partVal_; /** * repeated string part_val = 3; - * @return A list containing the partVal. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getPartValList() { return partVal_; } /** * repeated string part_val = 3; - * @return The count of partVal. */ public int getPartValCount() { return partVal_.size(); } /** * repeated string part_val = 3; - * @param index The index of the element to return. - * @return The partVal at the given index. */ public java.lang.String getPartVal(int index) { return partVal_.get(index); } /** * repeated string part_val = 3; - * @param index The index of the value to return. - * @return The bytes of the partVal at the given index. */ public com.google.protobuf.ByteString getPartValBytes(int index) { return partVal_.getByteString(index); } + private void initFields() { + tableName_ = ""; + partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; + partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; if (!hasTableName()) { memoizedIsInitialized = 0; @@ -27618,34 +24022,36 @@ public final boolean isInitialized() { return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tableName_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTableNameBytes()); } for (int i = 0; i < partKey_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, partKey_.getRaw(i)); + output.writeBytes(2, partKey_.getByteString(i)); } for (int i = 0; i < partVal_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 3, partVal_.getRaw(i)); + output.writeBytes(3, partVal_.getByteString(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, tableName_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTableNameBytes()); } { int dataSize = 0; for (int i = 0; i < partKey_.size(); i++) { - dataSize += computeStringSizeNoTag(partKey_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(partKey_.getByteString(i)); } size += dataSize; size += 1 * getPartKeyList().size(); @@ -27653,16 +24059,24 @@ public int getSerializedSize() { { int dataSize = 0; for (int i = 0; i < partVal_.size(); i++) { - dataSize += computeStringSizeNoTag(partVal_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(partVal_.getByteString(i)); } size += dataSize; size += 1 * getPartValList().size(); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -27673,26 +24087,29 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) obj; - if (hasTableName() != other.hasTableName()) return false; + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); if (hasTableName()) { - if (!getTableName() - .equals(other.getTableName())) return false; - } - if (!getPartKeyList() - .equals(other.getPartKeyList())) return false; - if (!getPartValList() - .equals(other.getPartValList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && getTableName() + .equals(other.getTableName()); + } + result = result && getPartKeyList() + .equals(other.getPartKeyList()); + result = result && getPartValList() + .equals(other.getPartValList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTableName()) { hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; hash = (53 * hash) + getTableName().hashCode(); @@ -27705,22 +24122,11 @@ public int hashCode() { hash = (37 * hash) + PART_VAL_FIELD_NUMBER; hash = (53 * hash) + getPartValList().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -27744,83 +24150,68 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ta } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } /** + * Protobuf type {@code TableProto} + * *
      * Used in EvictEntityRequestProto, can be used for non-partitioned and partitioned tables too.
      * For the latter part_key contains only the keys, part_val has the values for all partitions on all keys:
      * e.g.: for partitions pk0=p00/pk1=p01/pk2=p02 and pk0=p10/pk1=p11/pk2=p12
      * part_key: [pk0, pk1, pk2], part_val: [p00, p01, p02, p10, p11, p12]
      * 
- * - * Protobuf type {@code TableProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:TableProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -27833,16 +24224,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); tableName_ = ""; @@ -27854,18 +24247,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_TableProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto result = buildPartial(); if (!result.isInitialized()) { @@ -27874,22 +24268,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProt return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.tableName_ = tableName_; - if (((bitField0_ & 0x00000002) != 0)) { - partKey_ = partKey_.getUnmodifiableView(); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + partKey_ = new com.google.protobuf.UnmodifiableLazyStringList( + partKey_); bitField0_ = (bitField0_ & ~0x00000002); } result.partKey_ = partKey_; - if (((bitField0_ & 0x00000004) != 0)) { - partVal_ = partVal_.getUnmodifiableView(); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + partVal_ = new com.google.protobuf.UnmodifiableLazyStringList( + partVal_); bitField0_ = (bitField0_ & ~0x00000004); } result.partVal_ = partVal_; @@ -27898,39 +24293,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProt return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto)other); @@ -27967,20 +24329,18 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc } onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { if (!hasTableName()) { + return false; } return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -27990,7 +24350,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -28000,27 +24360,23 @@ public Builder mergeFrom( } private int bitField0_; + // required string table_name = 1; private java.lang.Object tableName_ = ""; /** * required string table_name = 1; - * @return Whether the tableName field is set. */ public boolean hasTableName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required string table_name = 1; - * @return The tableName. */ public java.lang.String getTableName() { java.lang.Object ref = tableName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - tableName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableName_ = s; return s; } else { return (java.lang.String) ref; @@ -28028,7 +24384,6 @@ public java.lang.String getTableName() { } /** * required string table_name = 1; - * @return The bytes for tableName. */ public com.google.protobuf.ByteString getTableNameBytes() { @@ -28045,8 +24400,6 @@ public java.lang.String getTableName() { } /** * required string table_name = 1; - * @param value The tableName to set. - * @return This builder for chaining. */ public Builder setTableName( java.lang.String value) { @@ -28060,7 +24413,6 @@ public Builder setTableName( } /** * required string table_name = 1; - * @return This builder for chaining. */ public Builder clearTableName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -28070,8 +24422,6 @@ public Builder clearTableName() { } /** * required string table_name = 1; - * @param value The bytes for tableName to set. - * @return This builder for chaining. */ public Builder setTableNameBytes( com.google.protobuf.ByteString value) { @@ -28084,40 +24434,35 @@ public Builder setTableNameBytes( return this; } + // repeated string part_key = 2; private com.google.protobuf.LazyStringList partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensurePartKeyIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { partKey_ = new com.google.protobuf.LazyStringArrayList(partKey_); bitField0_ |= 0x00000002; } } /** * repeated string part_key = 2; - * @return A list containing the partKey. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getPartKeyList() { - return partKey_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(partKey_); } /** * repeated string part_key = 2; - * @return The count of partKey. */ public int getPartKeyCount() { return partKey_.size(); } /** * repeated string part_key = 2; - * @param index The index of the element to return. - * @return The partKey at the given index. */ public java.lang.String getPartKey(int index) { return partKey_.get(index); } /** * repeated string part_key = 2; - * @param index The index of the value to return. - * @return The bytes of the partKey at the given index. */ public com.google.protobuf.ByteString getPartKeyBytes(int index) { @@ -28125,9 +24470,6 @@ public java.lang.String getPartKey(int index) { } /** * repeated string part_key = 2; - * @param index The index to set the value at. - * @param value The partKey to set. - * @return This builder for chaining. */ public Builder setPartKey( int index, java.lang.String value) { @@ -28141,8 +24483,6 @@ public Builder setPartKey( } /** * repeated string part_key = 2; - * @param value The partKey to add. - * @return This builder for chaining. */ public Builder addPartKey( java.lang.String value) { @@ -28156,20 +24496,16 @@ public Builder addPartKey( } /** * repeated string part_key = 2; - * @param values The partKey to add. - * @return This builder for chaining. */ public Builder addAllPartKey( java.lang.Iterable values) { ensurePartKeyIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, partKey_); + super.addAll(values, partKey_); onChanged(); return this; } /** * repeated string part_key = 2; - * @return This builder for chaining. */ public Builder clearPartKey() { partKey_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -28179,8 +24515,6 @@ public Builder clearPartKey() { } /** * repeated string part_key = 2; - * @param value The bytes of the partKey to add. - * @return This builder for chaining. */ public Builder addPartKeyBytes( com.google.protobuf.ByteString value) { @@ -28193,40 +24527,35 @@ public Builder addPartKeyBytes( return this; } + // repeated string part_val = 3; private com.google.protobuf.LazyStringList partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensurePartValIsMutable() { - if (!((bitField0_ & 0x00000004) != 0)) { + if (!((bitField0_ & 0x00000004) == 0x00000004)) { partVal_ = new com.google.protobuf.LazyStringArrayList(partVal_); bitField0_ |= 0x00000004; } } /** * repeated string part_val = 3; - * @return A list containing the partVal. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getPartValList() { - return partVal_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(partVal_); } /** * repeated string part_val = 3; - * @return The count of partVal. */ public int getPartValCount() { return partVal_.size(); } /** * repeated string part_val = 3; - * @param index The index of the element to return. - * @return The partVal at the given index. */ public java.lang.String getPartVal(int index) { return partVal_.get(index); } /** * repeated string part_val = 3; - * @param index The index of the value to return. - * @return The bytes of the partVal at the given index. */ public com.google.protobuf.ByteString getPartValBytes(int index) { @@ -28234,9 +24563,6 @@ public java.lang.String getPartVal(int index) { } /** * repeated string part_val = 3; - * @param index The index to set the value at. - * @param value The partVal to set. - * @return This builder for chaining. */ public Builder setPartVal( int index, java.lang.String value) { @@ -28250,8 +24576,6 @@ public Builder setPartVal( } /** * repeated string part_val = 3; - * @param value The partVal to add. - * @return This builder for chaining. */ public Builder addPartVal( java.lang.String value) { @@ -28265,20 +24589,16 @@ public Builder addPartVal( } /** * repeated string part_val = 3; - * @param values The partVal to add. - * @return This builder for chaining. */ public Builder addAllPartVal( java.lang.Iterable values) { ensurePartValIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, partVal_); + super.addAll(values, partVal_); onChanged(); return this; } /** * repeated string part_val = 3; - * @return This builder for chaining. */ public Builder clearPartVal() { partVal_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -28288,8 +24608,6 @@ public Builder clearPartVal() { } /** * repeated string part_val = 3; - * @param value The bytes of the partVal to add. - * @return This builder for chaining. */ public Builder addPartValBytes( com.google.protobuf.ByteString value) { @@ -28301,71 +24619,28 @@ public Builder addPartValBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:TableProto) } - // @@protoc_insertion_point(class_scope:TableProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public TableProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TableProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.TableProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new TableProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:TableProto) } - public interface EvictEntityResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:EvictEntityResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface EvictEntityResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // required int64 evicted_bytes = 1; /** * required int64 evicted_bytes = 1; - * @return Whether the evictedBytes field is set. */ boolean hasEvictedBytes(); /** * required int64 evicted_bytes = 1; - * @return The evictedBytes. */ long getEvictedBytes(); } @@ -28373,37 +24648,35 @@ public interface EvictEntityResponseProtoOrBuilder extends * Protobuf type {@code EvictEntityResponseProto} */ public static final class EvictEntityResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:EvictEntityResponseProto) - EvictEntityResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements EvictEntityResponseProtoOrBuilder { // Use EvictEntityResponseProto.newBuilder() to construct. - private EvictEntityResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private EvictEntityResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private EvictEntityResponseProto() { + private EvictEntityResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final EvictEntityResponseProto defaultInstance; + public static EvictEntityResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new EvictEntityResponseProto(); + public EvictEntityResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private EvictEntityResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -28415,27 +24688,25 @@ private EvictEntityResponseProto( case 0: done = true; break; - case 8: { - bitField0_ |= 0x00000001; - evictedBytes_ = input.readInt64(); - break; - } default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; } + case 8: { + bitField0_ |= 0x00000001; + evictedBytes_ = input.readInt64(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -28446,40 +24717,52 @@ private EvictEntityResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public EvictEntityResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new EvictEntityResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // required int64 evicted_bytes = 1; public static final int EVICTED_BYTES_FIELD_NUMBER = 1; private long evictedBytes_; /** * required int64 evicted_bytes = 1; - * @return Whether the evictedBytes field is set. */ - @java.lang.Override public boolean hasEvictedBytes() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 evicted_bytes = 1; - * @return The evictedBytes. */ - @java.lang.Override public long getEvictedBytes() { return evictedBytes_; } + private void initFields() { + evictedBytes_ = 0L; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; if (!hasEvictedBytes()) { memoizedIsInitialized = 0; @@ -28489,30 +24772,37 @@ public final boolean isInitialized() { return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, evictedBytes_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, evictedBytes_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -28523,43 +24813,34 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) obj; - if (hasEvictedBytes() != other.hasEvictedBytes()) return false; + boolean result = true; + result = result && (hasEvictedBytes() == other.hasEvictedBytes()); if (hasEvictedBytes()) { - if (getEvictedBytes() - != other.getEvictedBytes()) return false; + result = result && (getEvictedBytes() + == other.getEvictedBytes()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasEvictedBytes()) { hash = (37 * hash) + EVICTED_BYTES_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getEvictedBytes()); + hash = (53 * hash) + hashLong(getEvictedBytes()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -28583,59 +24864,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ev } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -28643,16 +24911,14 @@ protected Builder newBuilderForType( * Protobuf type {@code EvictEntityResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:EvictEntityResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -28665,16 +24931,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); evictedBytes_ = 0L; @@ -28682,18 +24950,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_EvictEntityResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -28702,53 +24971,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEnti return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.evictedBytes_ = evictedBytes_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } + result.evictedBytes_ = evictedBytes_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto)other); @@ -28763,20 +24998,18 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasEvictedBytes()) { setEvictedBytes(other.getEvictedBytes()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { if (!hasEvictedBytes()) { + return false; } return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -28786,7 +25019,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -28796,27 +25029,22 @@ public Builder mergeFrom( } private int bitField0_; + // required int64 evicted_bytes = 1; private long evictedBytes_ ; /** * required int64 evicted_bytes = 1; - * @return Whether the evictedBytes field is set. */ - @java.lang.Override public boolean hasEvictedBytes() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * required int64 evicted_bytes = 1; - * @return The evictedBytes. */ - @java.lang.Override public long getEvictedBytes() { return evictedBytes_; } /** * required int64 evicted_bytes = 1; - * @param value The evictedBytes to set. - * @return This builder for chaining. */ public Builder setEvictedBytes(long value) { bitField0_ |= 0x00000001; @@ -28826,7 +25054,6 @@ public Builder setEvictedBytes(long value) { } /** * required int64 evicted_bytes = 1; - * @return This builder for chaining. */ public Builder clearEvictedBytes() { bitField0_ = (bitField0_ & ~0x00000001); @@ -28834,98 +25061,54 @@ public Builder clearEvictedBytes() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:EvictEntityResponseProto) } - // @@protoc_insertion_point(class_scope:EvictEntityResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public EvictEntityResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new EvictEntityResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.EvictEntityResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new EvictEntityResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:EvictEntityResponseProto) } - public interface GetCacheContentRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GetCacheContentRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface GetCacheContentRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code GetCacheContentRequestProto} */ public static final class GetCacheContentRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GetCacheContentRequestProto) - GetCacheContentRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GetCacheContentRequestProtoOrBuilder { // Use GetCacheContentRequestProto.newBuilder() to construct. - private GetCacheContentRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GetCacheContentRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GetCacheContentRequestProto() { + private GetCacheContentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetCacheContentRequestProto defaultInstance; + public static GetCacheContentRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GetCacheContentRequestProto(); + public GetCacheContentRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GetCacheContentRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -28937,8 +25120,8 @@ private GetCacheContentRequestProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -28947,11 +25130,9 @@ private GetCacheContentRequestProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -28962,42 +25143,63 @@ private GetCacheContentRequestProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetCacheContentRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetCacheContentRequestProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -29008,33 +25210,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -29058,59 +25252,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ge } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -29118,16 +25299,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GetCacheContentRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GetCacheContentRequestProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -29140,33 +25319,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -29175,46 +25357,12 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto)other); @@ -29226,17 +25374,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto other) { if (other == org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -29246,7 +25391,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -29254,71 +25399,28 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GetCacheContentRequestProto) } - // @@protoc_insertion_point(class_scope:GetCacheContentRequestProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetCacheContentRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetCacheContentRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GetCacheContentRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GetCacheContentRequestProto) } - public interface GetCacheContentResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:GetCacheContentResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface GetCacheContentResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional .CacheEntryList result = 1; /** * optional .CacheEntryList result = 1; - * @return Whether the result field is set. */ boolean hasResult(); /** * optional .CacheEntryList result = 1; - * @return The result. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList getResult(); /** @@ -29330,37 +25432,35 @@ public interface GetCacheContentResponseProtoOrBuilder extends * Protobuf type {@code GetCacheContentResponseProto} */ public static final class GetCacheContentResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:GetCacheContentResponseProto) - GetCacheContentResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements GetCacheContentResponseProtoOrBuilder { // Use GetCacheContentResponseProto.newBuilder() to construct. - private GetCacheContentResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private GetCacheContentResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private GetCacheContentResponseProto() { + private GetCacheContentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final GetCacheContentResponseProto defaultInstance; + public static GetCacheContentResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new GetCacheContentResponseProto(); + public GetCacheContentResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private GetCacheContentResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -29372,9 +25472,16 @@ private GetCacheContentResponseProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = result_.toBuilder(); } result_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.PARSER, extensionRegistry); @@ -29385,22 +25492,13 @@ private GetCacheContentResponseProto( bitField0_ |= 0x00000001; break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -29411,76 +25509,94 @@ private GetCacheContentResponseProto( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public GetCacheContentResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new GetCacheContentResponseProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional .CacheEntryList result = 1; public static final int RESULT_FIELD_NUMBER = 1; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList result_; /** * optional .CacheEntryList result = 1; - * @return Whether the result field is set. */ - @java.lang.Override public boolean hasResult() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .CacheEntryList result = 1; - * @return The result. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList getResult() { - return result_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance() : result_; + return result_; } /** * optional .CacheEntryList result = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryListOrBuilder getResultOrBuilder() { - return result_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance() : result_; + return result_; } + private void initFields() { + result_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(1, getResult()); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(1, result_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, getResult()); + .computeMessageSize(1, result_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -29491,42 +25607,34 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto) obj; - if (hasResult() != other.hasResult()) return false; + boolean result = true; + result = result && (hasResult() == other.hasResult()); if (hasResult()) { - if (!getResult() - .equals(other.getResult())) return false; + result = result && getResult() + .equals(other.getResult()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasResult()) { hash = (37 * hash) + RESULT_FIELD_NUMBER; hash = (53 * hash) + getResult().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -29550,59 +25658,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ge } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -29610,16 +25705,14 @@ protected Builder newBuilderForType( * Protobuf type {@code GetCacheContentResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:GetCacheContentResponseProto) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -29632,21 +25725,23 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getResultFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (resultBuilder_ == null) { - result_ = null; + result_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance(); } else { resultBuilder_.clear(); } @@ -29654,18 +25749,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_GetCacheContentResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -29674,57 +25770,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - if (resultBuilder_ == null) { - result.result_ = result_; - } else { - result.result_ = resultBuilder_.build(); - } + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } + if (resultBuilder_ == null) { + result.result_ = result_; + } else { + result.result_ = resultBuilder_.build(); + } result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto)other); @@ -29739,17 +25801,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasResult()) { mergeResult(other.getResult()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -29759,7 +25818,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -29769,23 +25828,22 @@ public Builder mergeFrom( } private int bitField0_; - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList result_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .CacheEntryList result = 1; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList result_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryListOrBuilder> resultBuilder_; /** * optional .CacheEntryList result = 1; - * @return Whether the result field is set. */ public boolean hasResult() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .CacheEntryList result = 1; - * @return The result. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList getResult() { if (resultBuilder_ == null) { - return result_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance() : result_; + return result_; } else { return resultBuilder_.getMessage(); } @@ -29825,8 +25883,7 @@ public Builder setResult( */ public Builder mergeResult(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList value) { if (resultBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0) && - result_ != null && + if (((bitField0_ & 0x00000001) == 0x00000001) && result_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance()) { result_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.newBuilder(result_).mergeFrom(value).buildPartial(); @@ -29845,7 +25902,7 @@ public Builder mergeResult(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProt */ public Builder clearResult() { if (resultBuilder_ == null) { - result_ = null; + result_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance(); onChanged(); } else { resultBuilder_.clear(); @@ -29868,83 +25925,41 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr if (resultBuilder_ != null) { return resultBuilder_.getMessageOrBuilder(); } else { - return result_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance() : result_; + return result_; } } /** * optional .CacheEntryList result = 1; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryListOrBuilder> getResultFieldBuilder() { if (resultBuilder_ == null) { - resultBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + resultBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryListOrBuilder>( - getResult(), + result_, getParentForChildren(), isClean()); result_ = null; } return resultBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:GetCacheContentResponseProto) } - // @@protoc_insertion_point(class_scope:GetCacheContentResponseProto) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetCacheContentResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetCacheContentResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheContentResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new GetCacheContentResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:GetCacheContentResponseProto) } - public interface CacheEntryListOrBuilder extends - // @@protoc_insertion_point(interface_extends:CacheEntryList) - com.google.protobuf.MessageOrBuilder { + public interface CacheEntryListOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // repeated .CacheEntry entries = 1; /** * repeated .CacheEntry entries = 1; */ @@ -29973,38 +25988,35 @@ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuil * Protobuf type {@code CacheEntryList} */ public static final class CacheEntryList extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:CacheEntryList) - CacheEntryListOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements CacheEntryListOrBuilder { // Use CacheEntryList.newBuilder() to construct. - private CacheEntryList(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private CacheEntryList(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private CacheEntryList() { - entries_ = java.util.Collections.emptyList(); + private CacheEntryList(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CacheEntryList defaultInstance; + public static CacheEntryList getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new CacheEntryList(); + public CacheEntryList getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private CacheEntryList( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -30016,33 +26028,30 @@ private CacheEntryList( case 0: done = true; break; - case 10: { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - entries_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; } - entries_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.PARSER, extensionRegistry)); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; + case 10: { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { + entries_ = new java.util.ArrayList(); + mutable_bitField0_ |= 0x00000001; } + entries_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { entries_ = java.util.Collections.unmodifiableList(entries_); } this.unknownFields = unknownFields.build(); @@ -30054,27 +26063,40 @@ private CacheEntryList( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryList_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryList_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CacheEntryList parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CacheEntryList(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + // repeated .CacheEntry entries = 1; public static final int ENTRIES_FIELD_NUMBER = 1; private java.util.List entries_; /** * repeated .CacheEntry entries = 1; */ - @java.lang.Override public java.util.List getEntriesList() { return entries_; } /** * repeated .CacheEntry entries = 1; */ - @java.lang.Override public java.util.List getEntriesOrBuilderList() { return entries_; @@ -30082,49 +26104,47 @@ public java.util.Listrepeated .CacheEntry entries = 1; */ - @java.lang.Override public int getEntriesCount() { return entries_.size(); } /** * repeated .CacheEntry entries = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry getEntries(int index) { return entries_.get(index); } /** * repeated .CacheEntry entries = 1; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuilder getEntriesOrBuilder( int index) { return entries_.get(index); } + private void initFields() { + entries_ = java.util.Collections.emptyList(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); for (int i = 0; i < entries_.size(); i++) { output.writeMessage(1, entries_.get(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; @@ -30132,11 +26152,18 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, entries_.get(i)); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -30147,39 +26174,31 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList) obj; - if (!getEntriesList() - .equals(other.getEntriesList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && getEntriesList() + .equals(other.getEntriesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (getEntriesCount() > 0) { hash = (37 * hash) + ENTRIES_FIELD_NUMBER; hash = (53 * hash) + getEntriesList().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -30203,59 +26222,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ca } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -30263,16 +26269,14 @@ protected Builder newBuilderForType( * Protobuf type {@code CacheEntryList} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:CacheEntryList) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryListOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryListOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryList_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryList_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -30285,17 +26289,19 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getEntriesFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (entriesBuilder_ == null) { @@ -30307,18 +26313,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryList_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList result = buildPartial(); if (!result.isInitialized()) { @@ -30327,12 +26334,11 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList(this); int from_bitField0_ = bitField0_; if (entriesBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { entries_ = java.util.Collections.unmodifiableList(entries_); bitField0_ = (bitField0_ & ~0x00000001); } @@ -30344,39 +26350,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList)other); @@ -30407,24 +26380,21 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc entries_ = other.entries_; bitField0_ = (bitField0_ & ~0x00000001); entriesBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getEntriesFieldBuilder() : null; } else { entriesBuilder_.addAllMessages(other.entries_); } } } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -30434,7 +26404,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -30444,16 +26414,17 @@ public Builder mergeFrom( } private int bitField0_; + // repeated .CacheEntry entries = 1; private java.util.List entries_ = java.util.Collections.emptyList(); private void ensureEntriesIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { entries_ = new java.util.ArrayList(entries_); bitField0_ |= 0x00000001; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuilder> entriesBuilder_; /** @@ -30585,8 +26556,7 @@ public Builder addAllEntries( java.lang.Iterable values) { if (entriesBuilder_ == null) { ensureEntriesIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, entries_); + super.addAll(values, entries_); onChanged(); } else { entriesBuilder_.addAllMessages(values); @@ -30669,113 +26639,67 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr getEntriesBuilderList() { return getEntriesFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuilder> getEntriesFieldBuilder() { if (entriesBuilder_ == null) { - entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + entriesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuilder>( entries_, - ((bitField0_ & 0x00000001) != 0), + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); entries_ = null; } return entriesBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:CacheEntryList) } - // @@protoc_insertion_point(class_scope:CacheEntryList) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public CacheEntryList parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new CacheEntryList(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryList getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new CacheEntryList(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:CacheEntryList) } - public interface CacheEntryOrBuilder extends - // @@protoc_insertion_point(interface_extends:CacheEntry) - com.google.protobuf.MessageOrBuilder { + public interface CacheEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional bytes file_key = 1; /** * optional bytes file_key = 1; - * @return Whether the fileKey field is set. */ boolean hasFileKey(); /** * optional bytes file_key = 1; - * @return The fileKey. */ com.google.protobuf.ByteString getFileKey(); + // optional string file_path = 2; /** * optional string file_path = 2; - * @return Whether the filePath field is set. */ boolean hasFilePath(); /** * optional string file_path = 2; - * @return The filePath. */ java.lang.String getFilePath(); /** * optional string file_path = 2; - * @return The bytes for filePath. */ com.google.protobuf.ByteString getFilePathBytes(); + // optional .CacheTag cache_tag = 3; /** * optional .CacheTag cache_tag = 3; - * @return Whether the cacheTag field is set. */ boolean hasCacheTag(); /** * optional .CacheTag cache_tag = 3; - * @return The cacheTag. */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag getCacheTag(); /** @@ -30783,6 +26707,7 @@ public interface CacheEntryOrBuilder extends */ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder getCacheTagOrBuilder(); + // repeated .CacheEntryRange ranges = 4; /** * repeated .CacheEntryRange ranges = 4; */ @@ -30811,40 +26736,35 @@ org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeO * Protobuf type {@code CacheEntry} */ public static final class CacheEntry extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:CacheEntry) - CacheEntryOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements CacheEntryOrBuilder { // Use CacheEntry.newBuilder() to construct. - private CacheEntry(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private CacheEntry(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private CacheEntry() { - fileKey_ = com.google.protobuf.ByteString.EMPTY; - filePath_ = ""; - ranges_ = java.util.Collections.emptyList(); + private CacheEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CacheEntry defaultInstance; + public static CacheEntry getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new CacheEntry(); + public CacheEntry getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private CacheEntry( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -30856,20 +26776,26 @@ private CacheEntry( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { bitField0_ |= 0x00000001; fileKey_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - filePath_ = bs; + filePath_ = input.readBytes(); break; } case 26: { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.Builder subBuilder = null; - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { subBuilder = cacheTag_.toBuilder(); } cacheTag_ = input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.PARSER, extensionRegistry); @@ -30881,32 +26807,22 @@ private CacheEntry( break; } case 34: { - if (!((mutable_bitField0_ & 0x00000008) != 0)) { + if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) { ranges_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000008; } - ranges_.add( - input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.PARSER, extensionRegistry)); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + ranges_.add(input.readMessage(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000008) != 0)) { + if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) { ranges_ = java.util.Collections.unmodifiableList(ranges_); } this.unknownFields = unknownFields.build(); @@ -30918,49 +26834,57 @@ private CacheEntry( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CacheEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CacheEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional bytes file_key = 1; public static final int FILE_KEY_FIELD_NUMBER = 1; private com.google.protobuf.ByteString fileKey_; /** * optional bytes file_key = 1; - * @return Whether the fileKey field is set. */ - @java.lang.Override public boolean hasFileKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bytes file_key = 1; - * @return The fileKey. */ - @java.lang.Override public com.google.protobuf.ByteString getFileKey() { return fileKey_; } + // optional string file_path = 2; public static final int FILE_PATH_FIELD_NUMBER = 2; - private volatile java.lang.Object filePath_; + private java.lang.Object filePath_; /** * optional string file_path = 2; - * @return Whether the filePath field is set. */ - @java.lang.Override public boolean hasFilePath() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string file_path = 2; - * @return The filePath. */ - @java.lang.Override public java.lang.String getFilePath() { java.lang.Object ref = filePath_; if (ref instanceof java.lang.String) { @@ -30977,9 +26901,7 @@ public java.lang.String getFilePath() { } /** * optional string file_path = 2; - * @return The bytes for filePath. */ - @java.lang.Override public com.google.protobuf.ByteString getFilePathBytes() { java.lang.Object ref = filePath_; @@ -30994,45 +26916,40 @@ public java.lang.String getFilePath() { } } + // optional .CacheTag cache_tag = 3; public static final int CACHE_TAG_FIELD_NUMBER = 3; private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag cacheTag_; /** * optional .CacheTag cache_tag = 3; - * @return Whether the cacheTag field is set. */ - @java.lang.Override public boolean hasCacheTag() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .CacheTag cache_tag = 3; - * @return The cacheTag. */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag getCacheTag() { - return cacheTag_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance() : cacheTag_; + return cacheTag_; } /** * optional .CacheTag cache_tag = 3; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder getCacheTagOrBuilder() { - return cacheTag_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance() : cacheTag_; + return cacheTag_; } + // repeated .CacheEntryRange ranges = 4; public static final int RANGES_FIELD_NUMBER = 4; private java.util.List ranges_; /** * repeated .CacheEntryRange ranges = 4; */ - @java.lang.Override public java.util.List getRangesList() { return ranges_; } /** * repeated .CacheEntryRange ranges = 4; */ - @java.lang.Override public java.util.List getRangesOrBuilderList() { return ranges_; @@ -31040,81 +26957,90 @@ public java.util.Listrepeated .CacheEntryRange ranges = 4; */ - @java.lang.Override public int getRangesCount() { return ranges_.size(); } /** * repeated .CacheEntryRange ranges = 4; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange getRanges(int index) { return ranges_.get(index); } /** * repeated .CacheEntryRange ranges = 4; */ - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeOrBuilder getRangesOrBuilder( int index) { return ranges_.get(index); } + private void initFields() { + fileKey_ = com.google.protobuf.ByteString.EMPTY; + filePath_ = ""; + cacheTag_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance(); + ranges_ = java.util.Collections.emptyList(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeBytes(1, fileKey_); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, filePath_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getFilePathBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { - output.writeMessage(3, getCacheTag()); + if (((bitField0_ & 0x00000004) == 0x00000004)) { + output.writeMessage(3, cacheTag_); } for (int i = 0; i < ranges_.size(); i++) { output.writeMessage(4, ranges_.get(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(1, fileKey_); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, filePath_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getFilePathBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(3, getCacheTag()); + .computeMessageSize(3, cacheTag_); } for (int i = 0; i < ranges_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(4, ranges_.get(i)); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -31125,34 +27051,37 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry) obj; - if (hasFileKey() != other.hasFileKey()) return false; + boolean result = true; + result = result && (hasFileKey() == other.hasFileKey()); if (hasFileKey()) { - if (!getFileKey() - .equals(other.getFileKey())) return false; + result = result && getFileKey() + .equals(other.getFileKey()); } - if (hasFilePath() != other.hasFilePath()) return false; + result = result && (hasFilePath() == other.hasFilePath()); if (hasFilePath()) { - if (!getFilePath() - .equals(other.getFilePath())) return false; + result = result && getFilePath() + .equals(other.getFilePath()); } - if (hasCacheTag() != other.hasCacheTag()) return false; + result = result && (hasCacheTag() == other.hasCacheTag()); if (hasCacheTag()) { - if (!getCacheTag() - .equals(other.getCacheTag())) return false; + result = result && getCacheTag() + .equals(other.getCacheTag()); } - if (!getRangesList() - .equals(other.getRangesList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && getRangesList() + .equals(other.getRangesList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasFileKey()) { hash = (37 * hash) + FILE_KEY_FIELD_NUMBER; hash = (53 * hash) + getFileKey().hashCode(); @@ -31169,22 +27098,11 @@ public int hashCode() { hash = (37 * hash) + RANGES_FIELD_NUMBER; hash = (53 * hash) + getRangesList().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -31208,59 +27126,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ca } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -31268,16 +27173,14 @@ protected Builder newBuilderForType( * Protobuf type {@code CacheEntry} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:CacheEntry) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -31290,18 +27193,20 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getCacheTagFieldBuilder(); getRangesFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); fileKey_ = com.google.protobuf.ByteString.EMPTY; @@ -31309,7 +27214,7 @@ public Builder clear() { filePath_ = ""; bitField0_ = (bitField0_ & ~0x00000002); if (cacheTagBuilder_ == null) { - cacheTag_ = null; + cacheTag_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance(); } else { cacheTagBuilder_.clear(); } @@ -31323,18 +27228,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntry_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry result = buildPartial(); if (!result.isInitialized()) { @@ -31343,29 +27249,28 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.fileKey_ = fileKey_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.filePath_ = filePath_; - if (((from_bitField0_ & 0x00000004) != 0)) { - if (cacheTagBuilder_ == null) { - result.cacheTag_ = cacheTag_; - } else { - result.cacheTag_ = cacheTagBuilder_.build(); - } + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } + if (cacheTagBuilder_ == null) { + result.cacheTag_ = cacheTag_; + } else { + result.cacheTag_ = cacheTagBuilder_.build(); + } if (rangesBuilder_ == null) { - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { ranges_ = java.util.Collections.unmodifiableList(ranges_); bitField0_ = (bitField0_ & ~0x00000008); } @@ -31378,39 +27283,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry)other); @@ -31452,24 +27324,21 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc ranges_ = other.ranges_; bitField0_ = (bitField0_ & ~0x00000008); rangesBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getRangesFieldBuilder() : null; } else { rangesBuilder_.addAllMessages(other.ranges_); } } } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -31479,7 +27348,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -31489,27 +27358,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional bytes file_key = 1; private com.google.protobuf.ByteString fileKey_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes file_key = 1; - * @return Whether the fileKey field is set. */ - @java.lang.Override public boolean hasFileKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional bytes file_key = 1; - * @return The fileKey. */ - @java.lang.Override public com.google.protobuf.ByteString getFileKey() { return fileKey_; } /** * optional bytes file_key = 1; - * @param value The fileKey to set. - * @return This builder for chaining. */ public Builder setFileKey(com.google.protobuf.ByteString value) { if (value == null) { @@ -31522,7 +27386,6 @@ public Builder setFileKey(com.google.protobuf.ByteString value) { } /** * optional bytes file_key = 1; - * @return This builder for chaining. */ public Builder clearFileKey() { bitField0_ = (bitField0_ & ~0x00000001); @@ -31531,27 +27394,23 @@ public Builder clearFileKey() { return this; } + // optional string file_path = 2; private java.lang.Object filePath_ = ""; /** * optional string file_path = 2; - * @return Whether the filePath field is set. */ public boolean hasFilePath() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string file_path = 2; - * @return The filePath. */ public java.lang.String getFilePath() { java.lang.Object ref = filePath_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - filePath_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + filePath_ = s; return s; } else { return (java.lang.String) ref; @@ -31559,7 +27418,6 @@ public java.lang.String getFilePath() { } /** * optional string file_path = 2; - * @return The bytes for filePath. */ public com.google.protobuf.ByteString getFilePathBytes() { @@ -31576,8 +27434,6 @@ public java.lang.String getFilePath() { } /** * optional string file_path = 2; - * @param value The filePath to set. - * @return This builder for chaining. */ public Builder setFilePath( java.lang.String value) { @@ -31591,7 +27447,6 @@ public Builder setFilePath( } /** * optional string file_path = 2; - * @return This builder for chaining. */ public Builder clearFilePath() { bitField0_ = (bitField0_ & ~0x00000002); @@ -31601,8 +27456,6 @@ public Builder clearFilePath() { } /** * optional string file_path = 2; - * @param value The bytes for filePath to set. - * @return This builder for chaining. */ public Builder setFilePathBytes( com.google.protobuf.ByteString value) { @@ -31615,23 +27468,22 @@ public Builder setFilePathBytes( return this; } - private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag cacheTag_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .CacheTag cache_tag = 3; + private org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag cacheTag_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder> cacheTagBuilder_; /** * optional .CacheTag cache_tag = 3; - * @return Whether the cacheTag field is set. */ public boolean hasCacheTag() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional .CacheTag cache_tag = 3; - * @return The cacheTag. */ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag getCacheTag() { if (cacheTagBuilder_ == null) { - return cacheTag_ == null ? org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance() : cacheTag_; + return cacheTag_; } else { return cacheTagBuilder_.getMessage(); } @@ -31671,8 +27523,7 @@ public Builder setCacheTag( */ public Builder mergeCacheTag(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag value) { if (cacheTagBuilder_ == null) { - if (((bitField0_ & 0x00000004) != 0) && - cacheTag_ != null && + if (((bitField0_ & 0x00000004) == 0x00000004) && cacheTag_ != org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance()) { cacheTag_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.newBuilder(cacheTag_).mergeFrom(value).buildPartial(); @@ -31691,7 +27542,7 @@ public Builder mergeCacheTag(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonPr */ public Builder clearCacheTag() { if (cacheTagBuilder_ == null) { - cacheTag_ = null; + cacheTag_ = org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance(); onChanged(); } else { cacheTagBuilder_.clear(); @@ -31714,20 +27565,19 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagO if (cacheTagBuilder_ != null) { return cacheTagBuilder_.getMessageOrBuilder(); } else { - return cacheTag_ == null ? - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance() : cacheTag_; + return cacheTag_; } } /** * optional .CacheTag cache_tag = 3; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder> getCacheTagFieldBuilder() { if (cacheTagBuilder_ == null) { - cacheTagBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + cacheTagBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder>( - getCacheTag(), + cacheTag_, getParentForChildren(), isClean()); cacheTag_ = null; @@ -31735,16 +27585,17 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagO return cacheTagBuilder_; } + // repeated .CacheEntryRange ranges = 4; private java.util.List ranges_ = java.util.Collections.emptyList(); private void ensureRangesIsMutable() { - if (!((bitField0_ & 0x00000008) != 0)) { + if (!((bitField0_ & 0x00000008) == 0x00000008)) { ranges_ = new java.util.ArrayList(ranges_); bitField0_ |= 0x00000008; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeOrBuilder> rangesBuilder_; /** @@ -31876,8 +27727,7 @@ public Builder addAllRanges( java.lang.Iterable values) { if (rangesBuilder_ == null) { ensureRangesIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, ranges_); + super.addAll(values, ranges_); onChanged(); } else { rangesBuilder_.addAllMessages(values); @@ -31960,115 +27810,66 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr getRangesBuilderList() { return getRangesFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeOrBuilder> getRangesFieldBuilder() { if (rangesBuilder_ == null) { - rangesBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + rangesBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.Builder, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeOrBuilder>( ranges_, - ((bitField0_ & 0x00000008) != 0), + ((bitField0_ & 0x00000008) == 0x00000008), getParentForChildren(), isClean()); ranges_ = null; } return rangesBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:CacheEntry) } - // @@protoc_insertion_point(class_scope:CacheEntry) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public CacheEntry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new CacheEntry(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntry getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new CacheEntry(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:CacheEntry) } - public interface CacheTagOrBuilder extends - // @@protoc_insertion_point(interface_extends:CacheTag) - com.google.protobuf.MessageOrBuilder { + public interface CacheTagOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string table_name = 1; /** * optional string table_name = 1; - * @return Whether the tableName field is set. */ boolean hasTableName(); /** * optional string table_name = 1; - * @return The tableName. */ java.lang.String getTableName(); /** * optional string table_name = 1; - * @return The bytes for tableName. */ com.google.protobuf.ByteString getTableNameBytes(); + // repeated string partition_desc = 2; /** * repeated string partition_desc = 2; - * @return A list containing the partitionDesc. */ java.util.List - getPartitionDescList(); + getPartitionDescList(); /** * repeated string partition_desc = 2; - * @return The count of partitionDesc. */ int getPartitionDescCount(); /** * repeated string partition_desc = 2; - * @param index The index of the element to return. - * @return The partitionDesc at the given index. */ java.lang.String getPartitionDesc(int index); /** * repeated string partition_desc = 2; - * @param index The index of the value to return. - * @return The bytes of the partitionDesc at the given index. */ com.google.protobuf.ByteString getPartitionDescBytes(int index); @@ -32077,39 +27878,35 @@ public interface CacheTagOrBuilder extends * Protobuf type {@code CacheTag} */ public static final class CacheTag extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:CacheTag) - CacheTagOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements CacheTagOrBuilder { // Use CacheTag.newBuilder() to construct. - private CacheTag(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private CacheTag(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private CacheTag() { - tableName_ = ""; - partitionDesc_ = com.google.protobuf.LazyStringArrayList.EMPTY; + private CacheTag(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CacheTag defaultInstance; + public static CacheTag getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new CacheTag(); + public CacheTag getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private CacheTag( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -32121,40 +27918,36 @@ private CacheTag( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - tableName_ = bs; + tableName_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000002) != 0)) { + if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) { partitionDesc_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000002; } - partitionDesc_.add(bs); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + partitionDesc_.add(input.readBytes()); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000002) != 0)) { - partitionDesc_ = partitionDesc_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) { + partitionDesc_ = new com.google.protobuf.UnmodifiableLazyStringList(partitionDesc_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -32165,30 +27958,41 @@ private CacheTag( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheTag_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheTag_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CacheTag parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CacheTag(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string table_name = 1; public static final int TABLE_NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object tableName_; + private java.lang.Object tableName_; /** * optional string table_name = 1; - * @return Whether the tableName field is set. */ - @java.lang.Override public boolean hasTableName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string table_name = 1; - * @return The tableName. */ - @java.lang.Override public java.lang.String getTableName() { java.lang.Object ref = tableName_; if (ref instanceof java.lang.String) { @@ -32205,9 +28009,7 @@ public java.lang.String getTableName() { } /** * optional string table_name = 1; - * @return The bytes for tableName. */ - @java.lang.Override public com.google.protobuf.ByteString getTableNameBytes() { java.lang.Object ref = tableName_; @@ -32222,86 +28024,92 @@ public java.lang.String getTableName() { } } + // repeated string partition_desc = 2; public static final int PARTITION_DESC_FIELD_NUMBER = 2; private com.google.protobuf.LazyStringList partitionDesc_; /** * repeated string partition_desc = 2; - * @return A list containing the partitionDesc. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getPartitionDescList() { return partitionDesc_; } /** * repeated string partition_desc = 2; - * @return The count of partitionDesc. */ public int getPartitionDescCount() { return partitionDesc_.size(); } /** * repeated string partition_desc = 2; - * @param index The index of the element to return. - * @return The partitionDesc at the given index. */ public java.lang.String getPartitionDesc(int index) { return partitionDesc_.get(index); } /** * repeated string partition_desc = 2; - * @param index The index of the value to return. - * @return The bytes of the partitionDesc at the given index. */ public com.google.protobuf.ByteString getPartitionDescBytes(int index) { return partitionDesc_.getByteString(index); } + private void initFields() { + tableName_ = ""; + partitionDesc_ = com.google.protobuf.LazyStringArrayList.EMPTY; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, tableName_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getTableNameBytes()); } for (int i = 0; i < partitionDesc_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, partitionDesc_.getRaw(i)); + output.writeBytes(2, partitionDesc_.getByteString(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, tableName_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getTableNameBytes()); } { int dataSize = 0; for (int i = 0; i < partitionDesc_.size(); i++) { - dataSize += computeStringSizeNoTag(partitionDesc_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(partitionDesc_.getByteString(i)); } size += dataSize; size += 1 * getPartitionDescList().size(); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -32312,24 +28120,27 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag) obj; - if (hasTableName() != other.hasTableName()) return false; + boolean result = true; + result = result && (hasTableName() == other.hasTableName()); if (hasTableName()) { - if (!getTableName() - .equals(other.getTableName())) return false; + result = result && getTableName() + .equals(other.getTableName()); } - if (!getPartitionDescList() - .equals(other.getPartitionDescList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && getPartitionDescList() + .equals(other.getPartitionDescList()); + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasTableName()) { hash = (37 * hash) + TABLE_NAME_FIELD_NUMBER; hash = (53 * hash) + getTableName().hashCode(); @@ -32338,22 +28149,11 @@ public int hashCode() { hash = (37 * hash) + PARTITION_DESC_FIELD_NUMBER; hash = (53 * hash) + getPartitionDescList().hashCode(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -32377,59 +28177,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ca } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -32437,16 +28224,14 @@ protected Builder newBuilderForType( * Protobuf type {@code CacheTag} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:CacheTag) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTagOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheTag_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheTag_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -32459,16 +28244,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); tableName_ = ""; @@ -32478,18 +28265,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheTag_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag result = buildPartial(); if (!result.isInitialized()) { @@ -32498,17 +28286,17 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.tableName_ = tableName_; - if (((bitField0_ & 0x00000002) != 0)) { - partitionDesc_ = partitionDesc_.getUnmodifiableView(); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + partitionDesc_ = new com.google.protobuf.UnmodifiableLazyStringList( + partitionDesc_); bitField0_ = (bitField0_ & ~0x00000002); } result.partitionDesc_ = partitionDesc_; @@ -32517,39 +28305,6 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag)other); @@ -32576,17 +28331,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc } onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -32596,7 +28348,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -32606,27 +28358,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string table_name = 1; private java.lang.Object tableName_ = ""; /** * optional string table_name = 1; - * @return Whether the tableName field is set. */ public boolean hasTableName() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string table_name = 1; - * @return The tableName. */ public java.lang.String getTableName() { java.lang.Object ref = tableName_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - tableName_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + tableName_ = s; return s; } else { return (java.lang.String) ref; @@ -32634,7 +28382,6 @@ public java.lang.String getTableName() { } /** * optional string table_name = 1; - * @return The bytes for tableName. */ public com.google.protobuf.ByteString getTableNameBytes() { @@ -32651,8 +28398,6 @@ public java.lang.String getTableName() { } /** * optional string table_name = 1; - * @param value The tableName to set. - * @return This builder for chaining. */ public Builder setTableName( java.lang.String value) { @@ -32666,7 +28411,6 @@ public Builder setTableName( } /** * optional string table_name = 1; - * @return This builder for chaining. */ public Builder clearTableName() { bitField0_ = (bitField0_ & ~0x00000001); @@ -32676,8 +28420,6 @@ public Builder clearTableName() { } /** * optional string table_name = 1; - * @param value The bytes for tableName to set. - * @return This builder for chaining. */ public Builder setTableNameBytes( com.google.protobuf.ByteString value) { @@ -32690,40 +28432,35 @@ public Builder setTableNameBytes( return this; } + // repeated string partition_desc = 2; private com.google.protobuf.LazyStringList partitionDesc_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensurePartitionDescIsMutable() { - if (!((bitField0_ & 0x00000002) != 0)) { + if (!((bitField0_ & 0x00000002) == 0x00000002)) { partitionDesc_ = new com.google.protobuf.LazyStringArrayList(partitionDesc_); bitField0_ |= 0x00000002; } } /** * repeated string partition_desc = 2; - * @return A list containing the partitionDesc. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getPartitionDescList() { - return partitionDesc_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(partitionDesc_); } /** * repeated string partition_desc = 2; - * @return The count of partitionDesc. */ public int getPartitionDescCount() { return partitionDesc_.size(); } /** * repeated string partition_desc = 2; - * @param index The index of the element to return. - * @return The partitionDesc at the given index. */ public java.lang.String getPartitionDesc(int index) { return partitionDesc_.get(index); } /** * repeated string partition_desc = 2; - * @param index The index of the value to return. - * @return The bytes of the partitionDesc at the given index. */ public com.google.protobuf.ByteString getPartitionDescBytes(int index) { @@ -32731,9 +28468,6 @@ public java.lang.String getPartitionDesc(int index) { } /** * repeated string partition_desc = 2; - * @param index The index to set the value at. - * @param value The partitionDesc to set. - * @return This builder for chaining. */ public Builder setPartitionDesc( int index, java.lang.String value) { @@ -32747,8 +28481,6 @@ public Builder setPartitionDesc( } /** * repeated string partition_desc = 2; - * @param value The partitionDesc to add. - * @return This builder for chaining. */ public Builder addPartitionDesc( java.lang.String value) { @@ -32762,20 +28494,16 @@ public Builder addPartitionDesc( } /** * repeated string partition_desc = 2; - * @param values The partitionDesc to add. - * @return This builder for chaining. */ public Builder addAllPartitionDesc( java.lang.Iterable values) { ensurePartitionDescIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, partitionDesc_); + super.addAll(values, partitionDesc_); onChanged(); return this; } /** * repeated string partition_desc = 2; - * @return This builder for chaining. */ public Builder clearPartitionDesc() { partitionDesc_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -32785,8 +28513,6 @@ public Builder clearPartitionDesc() { } /** * repeated string partition_desc = 2; - * @param value The bytes of the partitionDesc to add. - * @return This builder for chaining. */ public Builder addPartitionDescBytes( com.google.protobuf.ByteString value) { @@ -32798,82 +28524,38 @@ public Builder addPartitionDescBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:CacheTag) } - // @@protoc_insertion_point(class_scope:CacheTag) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public CacheTag parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new CacheTag(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheTag getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new CacheTag(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:CacheTag) } - public interface CacheEntryRangeOrBuilder extends - // @@protoc_insertion_point(interface_extends:CacheEntryRange) - com.google.protobuf.MessageOrBuilder { + public interface CacheEntryRangeOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional int64 start = 1; /** * optional int64 start = 1; - * @return Whether the start field is set. */ boolean hasStart(); /** * optional int64 start = 1; - * @return The start. */ long getStart(); + // optional int64 end = 2; /** * optional int64 end = 2; - * @return Whether the end field is set. */ boolean hasEnd(); /** * optional int64 end = 2; - * @return The end. */ long getEnd(); } @@ -32881,37 +28563,35 @@ public interface CacheEntryRangeOrBuilder extends * Protobuf type {@code CacheEntryRange} */ public static final class CacheEntryRange extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:CacheEntryRange) - CacheEntryRangeOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements CacheEntryRangeOrBuilder { // Use CacheEntryRange.newBuilder() to construct. - private CacheEntryRange(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private CacheEntryRange(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private CacheEntryRange() { + private CacheEntryRange(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final CacheEntryRange defaultInstance; + public static CacheEntryRange getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new CacheEntryRange(); + public CacheEntryRange getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private CacheEntryRange( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -32923,6 +28603,13 @@ private CacheEntryRange( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 8: { bitField0_ |= 0x00000001; start_ = input.readInt64(); @@ -32933,22 +28620,13 @@ private CacheEntryRange( end_ = input.readInt64(); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -32959,95 +28637,112 @@ private CacheEntryRange( return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryRange_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryRange_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.class, org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public CacheEntryRange parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new CacheEntryRange(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional int64 start = 1; public static final int START_FIELD_NUMBER = 1; private long start_; /** * optional int64 start = 1; - * @return Whether the start field is set. */ - @java.lang.Override public boolean hasStart() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int64 start = 1; - * @return The start. */ - @java.lang.Override public long getStart() { return start_; } + // optional int64 end = 2; public static final int END_FIELD_NUMBER = 2; private long end_; /** * optional int64 end = 2; - * @return Whether the end field is set. */ - @java.lang.Override public boolean hasEnd() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 end = 2; - * @return The end. */ - @java.lang.Override public long getEnd() { return end_; } + private void initFields() { + start_ = 0L; + end_ = 0L; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, start_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, end_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, start_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, end_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -33058,53 +28753,43 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange other = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange) obj; - if (hasStart() != other.hasStart()) return false; + boolean result = true; + result = result && (hasStart() == other.hasStart()); if (hasStart()) { - if (getStart() - != other.getStart()) return false; + result = result && (getStart() + == other.getStart()); } - if (hasEnd() != other.hasEnd()) return false; + result = result && (hasEnd() == other.hasEnd()); if (hasEnd()) { - if (getEnd() - != other.getEnd()) return false; + result = result && (getEnd() + == other.getEnd()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasStart()) { hash = (37 * hash) + START_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getStart()); + hash = (53 * hash) + hashLong(getStart()); } if (hasEnd()) { hash = (37 * hash) + END_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getEnd()); + hash = (53 * hash) + hashLong(getEnd()); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -33128,59 +28813,46 @@ public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.Ca } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -33188,16 +28860,14 @@ protected Builder newBuilderForType( * Protobuf type {@code CacheEntryRange} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:CacheEntryRange) - org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRangeOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryRange_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryRange_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -33210,16 +28880,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); start_ = 0L; @@ -33229,18 +28901,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.internal_static_CacheEntryRange_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange build() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange result = buildPartial(); if (!result.isInitialized()) { @@ -33249,57 +28922,23 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntr return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange buildPartial() { org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange result = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.start_ = start_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { - result.end_ = end_; + result.start_ = start_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } + result.end_ = end_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange) { return mergeFrom((org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange)other); @@ -33317,17 +28956,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtoc if (other.hasEnd()) { setEnd(other.getEnd()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -33337,7 +28973,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -33347,27 +28983,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional int64 start = 1; private long start_ ; /** * optional int64 start = 1; - * @return Whether the start field is set. */ - @java.lang.Override public boolean hasStart() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int64 start = 1; - * @return The start. */ - @java.lang.Override public long getStart() { return start_; } /** * optional int64 start = 1; - * @param value The start to set. - * @return This builder for chaining. */ public Builder setStart(long value) { bitField0_ |= 0x00000001; @@ -33377,7 +29008,6 @@ public Builder setStart(long value) { } /** * optional int64 start = 1; - * @return This builder for chaining. */ public Builder clearStart() { bitField0_ = (bitField0_ & ~0x00000001); @@ -33386,27 +29016,22 @@ public Builder clearStart() { return this; } + // optional int64 end = 2; private long end_ ; /** * optional int64 end = 2; - * @return Whether the end field is set. */ - @java.lang.Override public boolean hasEnd() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional int64 end = 2; - * @return The end. */ - @java.lang.Override public long getEnd() { return end_; } /** * optional int64 end = 2; - * @param value The end to set. - * @return This builder for chaining. */ public Builder setEnd(long value) { bitField0_ |= 0x00000002; @@ -33416,7 +29041,6 @@ public Builder setEnd(long value) { } /** * optional int64 end = 2; - * @return This builder for chaining. */ public Builder clearEnd() { bitField0_ = (bitField0_ & ~0x00000002); @@ -33424,57 +29048,16 @@ public Builder clearEnd() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:CacheEntryRange) } - // @@protoc_insertion_point(class_scope:CacheEntryRange) - private static final org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange(); - } - - public static org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public CacheEntryRange parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new CacheEntryRange(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.CacheEntryRange getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new CacheEntryRange(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:CacheEntryRange) } /** @@ -34651,212 +30234,212 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC // @@protoc_insertion_point(class_scope:LlapManagementProtocol) } - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_UserPayloadProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UserPayloadProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_EntityDescriptorProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EntityDescriptorProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_IOSpecProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_IOSpecProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GroupInputSpecProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GroupInputSpecProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SignableVertexSpec_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SignableVertexSpec_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_VertexOrBinary_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_VertexOrBinary_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_FragmentRuntimeInfo_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_FragmentRuntimeInfo_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_QueryIdentifierProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_QueryIdentifierProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_NotTezEvent_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_NotTezEvent_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SubmitWorkRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SubmitWorkRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_RegisterDagRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegisterDagRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_RegisterDagResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_RegisterDagResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SubmitWorkResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SubmitWorkResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SourceStateUpdatedRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SourceStateUpdatedResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_QueryCompleteRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_QueryCompleteRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_QueryCompleteResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_QueryCompleteResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_TerminateFragmentRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TerminateFragmentRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_TerminateFragmentResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TerminateFragmentResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_UpdateFragmentRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UpdateFragmentRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_UpdateFragmentResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UpdateFragmentResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetTokenRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetTokenRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetTokenResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetTokenResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_LlapOutputSocketInitMessage_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_LlapOutputSocketInitMessage_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_PurgeCacheRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_PurgeCacheRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_PurgeCacheResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_PurgeCacheResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_MapEntry_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_MapEntry_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetDaemonMetricsRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetDaemonMetricsResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SetCapacityRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetCapacityRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_SetCapacityResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_SetCapacityResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_EvictEntityRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EvictEntityRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_TableProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_TableProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_EvictEntityResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_EvictEntityResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetCacheContentRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetCacheContentRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_GetCacheContentResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_GetCacheContentResponseProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_CacheEntryList_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CacheEntryList_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_CacheEntry_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CacheEntry_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_CacheTag_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CacheTag_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_CacheEntryRange_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_CacheEntryRange_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } - private static com.google.protobuf.Descriptors.FileDescriptor + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { @@ -34869,7 +30452,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "-\n\rio_descriptor\030\002 \001(\0132\026.EntityDescripto" + "rProto\022\033\n\023physical_edge_count\030\003 \001(\005\"z\n\023G" + "roupInputSpecProto\022\022\n\ngroup_name\030\001 \001(\t\022\026" + - "\n\016group_vertices\030\002 \003(\t\0227\n\027merged_input_d" + + "\n\016group_vertices\030\002 \003(\t\0227\n\027merged_input_d", "escriptor\030\003 \001(\0132\026.EntityDescriptorProto\"" + "\314\003\n\022SignableVertexSpec\022\014\n\004user\030\001 \001(\t\022\026\n\016" + "signatureKeyId\030\002 \001(\003\022/\n\020query_identifier" + @@ -34879,7 +30462,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "en_identifier\030\010 \001(\t\0224\n\024processor_descrip" + "tor\030\t \001(\0132\026.EntityDescriptorProto\022!\n\013inp" + "ut_specs\030\n \003(\0132\014.IOSpecProto\022\"\n\014output_s" + - "pecs\030\013 \003(\0132\014.IOSpecProto\0221\n\023grouped_inpu" + + "pecs\030\013 \003(\0132\014.IOSpecProto\0221\n\023grouped_inpu", "t_specs\030\014 \003(\0132\024.GroupInputSpecProto\022\032\n\022v" + "ertex_parallelism\030\r \001(\005\022%\n\026is_external_s" + "ubmission\030\016 \001(\010:\005false\"K\n\016VertexOrBinary" + @@ -34889,7 +30472,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "\022-\n%num_self_and_upstream_completed_task" + "s\030\002 \001(\005\022\033\n\023within_dag_priority\030\003 \001(\005\022\026\n\016" + "dag_start_time\030\004 \001(\003\022 \n\030first_attempt_st" + - "art_time\030\005 \001(\003\022\"\n\032current_attempt_start_" + + "art_time\030\005 \001(\003\022\"\n\032current_attempt_start_", "time\030\006 \001(\003\"d\n\024QueryIdentifierProto\022\035\n\025ap" + "plication_id_string\030\001 \001(\t\022\021\n\tdag_index\030\002" + " \001(\005\022\032\n\022app_attempt_number\030\003 \001(\005\"l\n\013NotT" + @@ -34899,7 +30482,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "uestProto\022\"\n\twork_spec\030\001 \001(\0132\017.VertexOrB" + "inary\022\033\n\023work_spec_signature\030\002 \001(\014\022\027\n\017fr" + "agment_number\030\003 \001(\005\022\026\n\016attempt_number\030\004 " + - "\001(\005\022\033\n\023container_id_string\030\005 \001(\t\022\017\n\007am_h" + + "\001(\005\022\033\n\023container_id_string\030\005 \001(\t\022\017\n\007am_h", "ost\030\006 \001(\t\022\017\n\007am_port\030\007 \001(\005\022\032\n\022credential" + "s_binary\030\010 \001(\014\0223\n\025fragment_runtime_info\030" + "\t \001(\0132\024.FragmentRuntimeInfo\022\033\n\023initial_e" + @@ -34909,7 +30492,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "st\030\016 \001(\010:\005false\"t\n\027RegisterDagRequestPro" + "to\022\014\n\004user\030\001 \001(\t\022/\n\020query_identifier\030\002 \002" + "(\0132\025.QueryIdentifierProto\022\032\n\022credentials" + - "_binary\030\003 \001(\014\"\032\n\030RegisterDagResponseProt" + + "_binary\030\003 \001(\014\"\032\n\030RegisterDagResponseProt", "o\"b\n\027SubmitWorkResponseProto\022/\n\020submissi" + "on_state\030\001 \001(\0162\025.SubmissionStateProto\022\026\n" + "\016unique_node_id\030\002 \001(\t\"\205\001\n\036SourceStateUpd" + @@ -34919,7 +30502,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "\037SourceStateUpdatedResponseProto\"e\n\031Quer" + "yCompleteRequestProto\022/\n\020query_identifie" + "r\030\001 \001(\0132\025.QueryIdentifierProto\022\027\n\014delete" + - "_delay\030\002 \001(\003:\0010\"\034\n\032QueryCompleteResponse" + + "_delay\030\002 \001(\003:\0010\"\034\n\032QueryCompleteResponse", "Proto\"t\n\035TerminateFragmentRequestProto\022/" + "\n\020query_identifier\030\001 \001(\0132\025.QueryIdentifi" + "erProto\022\"\n\032fragment_identifier_string\030\002 " + @@ -34929,7 +30512,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "\032fragment_identifier_string\030\002 \001(\t\022\025\n\ris_" + "guaranteed\030\003 \001(\010\"D\n\033UpdateFragmentRespon" + "seProto\022\016\n\006result\030\001 \001(\010\022\025\n\ris_guaranteed" + - "\030\002 \001(\010\"&\n\024GetTokenRequestProto\022\016\n\006app_id" + + "\030\002 \001(\010\"&\n\024GetTokenRequestProto\022\016\n\006app_id", "\030\001 \001(\t\"&\n\025GetTokenResponseProto\022\r\n\005token" + "\030\001 \001(\014\"A\n\033LlapOutputSocketInitMessage\022\023\n" + "\013fragment_id\030\001 \002(\t\022\r\n\005token\030\002 \001(\014\"\030\n\026Pur" + @@ -34939,7 +30522,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "etDaemonMetricsRequestProto\";\n\035GetDaemon" + "MetricsResponseProto\022\032\n\007metrics\030\001 \003(\0132\t." + "MapEntry\"A\n\027SetCapacityRequestProto\022\023\n\013e" + - "xecutorNum\030\001 \001(\005\022\021\n\tqueueSize\030\002 \001(\005\"\032\n\030S" + + "xecutorNum\030\001 \001(\005\022\021\n\tqueueSize\030\002 \001(\005\"\032\n\030S", "etCapacityResponseProto\"F\n\027EvictEntityRe" + "questProto\022\017\n\007db_name\030\001 \002(\t\022\032\n\005table\030\002 \003" + "(\0132\013.TableProto\"D\n\nTableProto\022\022\n\ntable_n" + @@ -34949,7 +30532,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "stProto\"?\n\034GetCacheContentResponseProto\022" + "\037\n\006result\030\001 \001(\0132\017.CacheEntryList\".\n\016Cach" + "eEntryList\022\034\n\007entries\030\001 \003(\0132\013.CacheEntry" + - "\"q\n\nCacheEntry\022\020\n\010file_key\030\001 \001(\014\022\021\n\tfile" + + "\"q\n\nCacheEntry\022\020\n\010file_key\030\001 \001(\014\022\021\n\tfile", "_path\030\002 \001(\t\022\034\n\tcache_tag\030\003 \001(\0132\t.CacheTa" + "g\022 \n\006ranges\030\004 \003(\0132\020.CacheEntryRange\"6\n\010C" + "acheTag\022\022\n\ntable_name\030\001 \001(\t\022\026\n\016partition" + @@ -34959,7 +30542,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "sionStateProto\022\014\n\010ACCEPTED\020\001\022\014\n\010REJECTED" + "\020\002\022\021\n\rEVICTED_OTHER\020\0032\337\003\n\022LlapDaemonProt" + "ocol\022B\n\013registerDag\022\030.RegisterDagRequest" + - "Proto\032\031.RegisterDagResponseProto\022?\n\nsubm" + + "Proto\032\031.RegisterDagResponseProto\022?\n\nsubm", "itWork\022\027.SubmitWorkRequestProto\032\030.Submit" + "WorkResponseProto\022W\n\022sourceStateUpdated\022" + "\037.SourceStateUpdatedRequestProto\032 .Sourc" + @@ -34969,7 +30552,7 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "t\022\036.TerminateFragmentRequestProto\032\037.Term" + "inateFragmentResponseProto\022K\n\016updateFrag" + "ment\022\033.UpdateFragmentRequestProto\032\034.Upda" + - "teFragmentResponseProto2\311\003\n\026LlapManageme" + + "teFragmentResponseProto2\311\003\n\026LlapManageme", "ntProtocol\022C\n\022getDelegationToken\022\025.GetTo" + "kenRequestProto\032\026.GetTokenResponseProto\022" + "?\n\npurgeCache\022\027.PurgeCacheRequestProto\032\030" + @@ -34979,255 +30562,263 @@ public org.apache.hadoop.hive.llap.daemon.rpc.LlapDaemonProtocolProtos.GetCacheC "ty\022\030.SetCapacityRequestProto\032\031.SetCapaci" + "tyResponseProto\022B\n\013evictEntity\022\030.EvictEn" + "tityRequestProto\032\031.EvictEntityResponsePr" + - "oto\022N\n\017getCacheContent\022\034.GetCacheContent" + + "oto\022N\n\017getCacheContent\022\034.GetCacheContent", "RequestProto\032\035.GetCacheContentResponsePr" + "otoBH\n&org.apache.hadoop.hive.llap.daemo" + "n.rpcB\030LlapDaemonProtocolProtos\210\001\001\240\001\001" }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_UserPayloadProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_UserPayloadProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UserPayloadProto_descriptor, + new java.lang.String[] { "UserPayload", "Version", }); + internal_static_EntityDescriptorProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_EntityDescriptorProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EntityDescriptorProto_descriptor, + new java.lang.String[] { "ClassName", "UserPayload", "HistoryText", }); + internal_static_IOSpecProto_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_IOSpecProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_IOSpecProto_descriptor, + new java.lang.String[] { "ConnectedVertexName", "IoDescriptor", "PhysicalEdgeCount", }); + internal_static_GroupInputSpecProto_descriptor = + getDescriptor().getMessageTypes().get(3); + internal_static_GroupInputSpecProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GroupInputSpecProto_descriptor, + new java.lang.String[] { "GroupName", "GroupVertices", "MergedInputDescriptor", }); + internal_static_SignableVertexSpec_descriptor = + getDescriptor().getMessageTypes().get(4); + internal_static_SignableVertexSpec_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SignableVertexSpec_descriptor, + new java.lang.String[] { "User", "SignatureKeyId", "QueryIdentifier", "HiveQueryId", "DagName", "VertexName", "VertexIndex", "TokenIdentifier", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "IsExternalSubmission", }); + internal_static_VertexOrBinary_descriptor = + getDescriptor().getMessageTypes().get(5); + internal_static_VertexOrBinary_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_VertexOrBinary_descriptor, + new java.lang.String[] { "Vertex", "VertexBinary", }); + internal_static_FragmentRuntimeInfo_descriptor = + getDescriptor().getMessageTypes().get(6); + internal_static_FragmentRuntimeInfo_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_FragmentRuntimeInfo_descriptor, + new java.lang.String[] { "NumSelfAndUpstreamTasks", "NumSelfAndUpstreamCompletedTasks", "WithinDagPriority", "DagStartTime", "FirstAttemptStartTime", "CurrentAttemptStartTime", }); + internal_static_QueryIdentifierProto_descriptor = + getDescriptor().getMessageTypes().get(7); + internal_static_QueryIdentifierProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_QueryIdentifierProto_descriptor, + new java.lang.String[] { "ApplicationIdString", "DagIndex", "AppAttemptNumber", }); + internal_static_NotTezEvent_descriptor = + getDescriptor().getMessageTypes().get(8); + internal_static_NotTezEvent_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_NotTezEvent_descriptor, + new java.lang.String[] { "InputEventProtoBytes", "VertexName", "DestInputName", "KeyId", }); + internal_static_SubmitWorkRequestProto_descriptor = + getDescriptor().getMessageTypes().get(9); + internal_static_SubmitWorkRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SubmitWorkRequestProto_descriptor, + new java.lang.String[] { "WorkSpec", "WorkSpecSignature", "FragmentNumber", "AttemptNumber", "ContainerIdString", "AmHost", "AmPort", "CredentialsBinary", "FragmentRuntimeInfo", "InitialEventBytes", "InitialEventSignature", "IsGuaranteed", "Jwt", "IsExternalClientRequest", }); + internal_static_RegisterDagRequestProto_descriptor = + getDescriptor().getMessageTypes().get(10); + internal_static_RegisterDagRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegisterDagRequestProto_descriptor, + new java.lang.String[] { "User", "QueryIdentifier", "CredentialsBinary", }); + internal_static_RegisterDagResponseProto_descriptor = + getDescriptor().getMessageTypes().get(11); + internal_static_RegisterDagResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_RegisterDagResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_SubmitWorkResponseProto_descriptor = + getDescriptor().getMessageTypes().get(12); + internal_static_SubmitWorkResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SubmitWorkResponseProto_descriptor, + new java.lang.String[] { "SubmissionState", "UniqueNodeId", }); + internal_static_SourceStateUpdatedRequestProto_descriptor = + getDescriptor().getMessageTypes().get(13); + internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SourceStateUpdatedRequestProto_descriptor, + new java.lang.String[] { "QueryIdentifier", "SrcName", "State", }); + internal_static_SourceStateUpdatedResponseProto_descriptor = + getDescriptor().getMessageTypes().get(14); + internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SourceStateUpdatedResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_QueryCompleteRequestProto_descriptor = + getDescriptor().getMessageTypes().get(15); + internal_static_QueryCompleteRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_QueryCompleteRequestProto_descriptor, + new java.lang.String[] { "QueryIdentifier", "DeleteDelay", }); + internal_static_QueryCompleteResponseProto_descriptor = + getDescriptor().getMessageTypes().get(16); + internal_static_QueryCompleteResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_QueryCompleteResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_TerminateFragmentRequestProto_descriptor = + getDescriptor().getMessageTypes().get(17); + internal_static_TerminateFragmentRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TerminateFragmentRequestProto_descriptor, + new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", }); + internal_static_TerminateFragmentResponseProto_descriptor = + getDescriptor().getMessageTypes().get(18); + internal_static_TerminateFragmentResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TerminateFragmentResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_UpdateFragmentRequestProto_descriptor = + getDescriptor().getMessageTypes().get(19); + internal_static_UpdateFragmentRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFragmentRequestProto_descriptor, + new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", "IsGuaranteed", }); + internal_static_UpdateFragmentResponseProto_descriptor = + getDescriptor().getMessageTypes().get(20); + internal_static_UpdateFragmentResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateFragmentResponseProto_descriptor, + new java.lang.String[] { "Result", "IsGuaranteed", }); + internal_static_GetTokenRequestProto_descriptor = + getDescriptor().getMessageTypes().get(21); + internal_static_GetTokenRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTokenRequestProto_descriptor, + new java.lang.String[] { "AppId", }); + internal_static_GetTokenResponseProto_descriptor = + getDescriptor().getMessageTypes().get(22); + internal_static_GetTokenResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetTokenResponseProto_descriptor, + new java.lang.String[] { "Token", }); + internal_static_LlapOutputSocketInitMessage_descriptor = + getDescriptor().getMessageTypes().get(23); + internal_static_LlapOutputSocketInitMessage_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_LlapOutputSocketInitMessage_descriptor, + new java.lang.String[] { "FragmentId", "Token", }); + internal_static_PurgeCacheRequestProto_descriptor = + getDescriptor().getMessageTypes().get(24); + internal_static_PurgeCacheRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_PurgeCacheRequestProto_descriptor, + new java.lang.String[] { }); + internal_static_PurgeCacheResponseProto_descriptor = + getDescriptor().getMessageTypes().get(25); + internal_static_PurgeCacheResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_PurgeCacheResponseProto_descriptor, + new java.lang.String[] { "PurgedMemoryBytes", }); + internal_static_MapEntry_descriptor = + getDescriptor().getMessageTypes().get(26); + internal_static_MapEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MapEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_GetDaemonMetricsRequestProto_descriptor = + getDescriptor().getMessageTypes().get(27); + internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetDaemonMetricsRequestProto_descriptor, + new java.lang.String[] { }); + internal_static_GetDaemonMetricsResponseProto_descriptor = + getDescriptor().getMessageTypes().get(28); + internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetDaemonMetricsResponseProto_descriptor, + new java.lang.String[] { "Metrics", }); + internal_static_SetCapacityRequestProto_descriptor = + getDescriptor().getMessageTypes().get(29); + internal_static_SetCapacityRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SetCapacityRequestProto_descriptor, + new java.lang.String[] { "ExecutorNum", "QueueSize", }); + internal_static_SetCapacityResponseProto_descriptor = + getDescriptor().getMessageTypes().get(30); + internal_static_SetCapacityResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_SetCapacityResponseProto_descriptor, + new java.lang.String[] { }); + internal_static_EvictEntityRequestProto_descriptor = + getDescriptor().getMessageTypes().get(31); + internal_static_EvictEntityRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EvictEntityRequestProto_descriptor, + new java.lang.String[] { "DbName", "Table", }); + internal_static_TableProto_descriptor = + getDescriptor().getMessageTypes().get(32); + internal_static_TableProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_TableProto_descriptor, + new java.lang.String[] { "TableName", "PartKey", "PartVal", }); + internal_static_EvictEntityResponseProto_descriptor = + getDescriptor().getMessageTypes().get(33); + internal_static_EvictEntityResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_EvictEntityResponseProto_descriptor, + new java.lang.String[] { "EvictedBytes", }); + internal_static_GetCacheContentRequestProto_descriptor = + getDescriptor().getMessageTypes().get(34); + internal_static_GetCacheContentRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetCacheContentRequestProto_descriptor, + new java.lang.String[] { }); + internal_static_GetCacheContentResponseProto_descriptor = + getDescriptor().getMessageTypes().get(35); + internal_static_GetCacheContentResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_GetCacheContentResponseProto_descriptor, + new java.lang.String[] { "Result", }); + internal_static_CacheEntryList_descriptor = + getDescriptor().getMessageTypes().get(36); + internal_static_CacheEntryList_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CacheEntryList_descriptor, + new java.lang.String[] { "Entries", }); + internal_static_CacheEntry_descriptor = + getDescriptor().getMessageTypes().get(37); + internal_static_CacheEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CacheEntry_descriptor, + new java.lang.String[] { "FileKey", "FilePath", "CacheTag", "Ranges", }); + internal_static_CacheTag_descriptor = + getDescriptor().getMessageTypes().get(38); + internal_static_CacheTag_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CacheTag_descriptor, + new java.lang.String[] { "TableName", "PartitionDesc", }); + internal_static_CacheEntryRange_descriptor = + getDescriptor().getMessageTypes().get(39); + internal_static_CacheEntryRange_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_CacheEntryRange_descriptor, + new java.lang.String[] { "Start", "End", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { - }); - internal_static_UserPayloadProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_UserPayloadProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_UserPayloadProto_descriptor, - new java.lang.String[] { "UserPayload", "Version", }); - internal_static_EntityDescriptorProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_EntityDescriptorProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_EntityDescriptorProto_descriptor, - new java.lang.String[] { "ClassName", "UserPayload", "HistoryText", }); - internal_static_IOSpecProto_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_IOSpecProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_IOSpecProto_descriptor, - new java.lang.String[] { "ConnectedVertexName", "IoDescriptor", "PhysicalEdgeCount", }); - internal_static_GroupInputSpecProto_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_GroupInputSpecProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GroupInputSpecProto_descriptor, - new java.lang.String[] { "GroupName", "GroupVertices", "MergedInputDescriptor", }); - internal_static_SignableVertexSpec_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_SignableVertexSpec_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SignableVertexSpec_descriptor, - new java.lang.String[] { "User", "SignatureKeyId", "QueryIdentifier", "HiveQueryId", "DagName", "VertexName", "VertexIndex", "TokenIdentifier", "ProcessorDescriptor", "InputSpecs", "OutputSpecs", "GroupedInputSpecs", "VertexParallelism", "IsExternalSubmission", }); - internal_static_VertexOrBinary_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_VertexOrBinary_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_VertexOrBinary_descriptor, - new java.lang.String[] { "Vertex", "VertexBinary", }); - internal_static_FragmentRuntimeInfo_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_FragmentRuntimeInfo_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_FragmentRuntimeInfo_descriptor, - new java.lang.String[] { "NumSelfAndUpstreamTasks", "NumSelfAndUpstreamCompletedTasks", "WithinDagPriority", "DagStartTime", "FirstAttemptStartTime", "CurrentAttemptStartTime", }); - internal_static_QueryIdentifierProto_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_QueryIdentifierProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_QueryIdentifierProto_descriptor, - new java.lang.String[] { "ApplicationIdString", "DagIndex", "AppAttemptNumber", }); - internal_static_NotTezEvent_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_NotTezEvent_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_NotTezEvent_descriptor, - new java.lang.String[] { "InputEventProtoBytes", "VertexName", "DestInputName", "KeyId", }); - internal_static_SubmitWorkRequestProto_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_SubmitWorkRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SubmitWorkRequestProto_descriptor, - new java.lang.String[] { "WorkSpec", "WorkSpecSignature", "FragmentNumber", "AttemptNumber", "ContainerIdString", "AmHost", "AmPort", "CredentialsBinary", "FragmentRuntimeInfo", "InitialEventBytes", "InitialEventSignature", "IsGuaranteed", "Jwt", "IsExternalClientRequest", }); - internal_static_RegisterDagRequestProto_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_RegisterDagRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_RegisterDagRequestProto_descriptor, - new java.lang.String[] { "User", "QueryIdentifier", "CredentialsBinary", }); - internal_static_RegisterDagResponseProto_descriptor = - getDescriptor().getMessageTypes().get(11); - internal_static_RegisterDagResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_RegisterDagResponseProto_descriptor, - new java.lang.String[] { }); - internal_static_SubmitWorkResponseProto_descriptor = - getDescriptor().getMessageTypes().get(12); - internal_static_SubmitWorkResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SubmitWorkResponseProto_descriptor, - new java.lang.String[] { "SubmissionState", "UniqueNodeId", }); - internal_static_SourceStateUpdatedRequestProto_descriptor = - getDescriptor().getMessageTypes().get(13); - internal_static_SourceStateUpdatedRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SourceStateUpdatedRequestProto_descriptor, - new java.lang.String[] { "QueryIdentifier", "SrcName", "State", }); - internal_static_SourceStateUpdatedResponseProto_descriptor = - getDescriptor().getMessageTypes().get(14); - internal_static_SourceStateUpdatedResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SourceStateUpdatedResponseProto_descriptor, - new java.lang.String[] { }); - internal_static_QueryCompleteRequestProto_descriptor = - getDescriptor().getMessageTypes().get(15); - internal_static_QueryCompleteRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_QueryCompleteRequestProto_descriptor, - new java.lang.String[] { "QueryIdentifier", "DeleteDelay", }); - internal_static_QueryCompleteResponseProto_descriptor = - getDescriptor().getMessageTypes().get(16); - internal_static_QueryCompleteResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_QueryCompleteResponseProto_descriptor, - new java.lang.String[] { }); - internal_static_TerminateFragmentRequestProto_descriptor = - getDescriptor().getMessageTypes().get(17); - internal_static_TerminateFragmentRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_TerminateFragmentRequestProto_descriptor, - new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", }); - internal_static_TerminateFragmentResponseProto_descriptor = - getDescriptor().getMessageTypes().get(18); - internal_static_TerminateFragmentResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_TerminateFragmentResponseProto_descriptor, - new java.lang.String[] { }); - internal_static_UpdateFragmentRequestProto_descriptor = - getDescriptor().getMessageTypes().get(19); - internal_static_UpdateFragmentRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_UpdateFragmentRequestProto_descriptor, - new java.lang.String[] { "QueryIdentifier", "FragmentIdentifierString", "IsGuaranteed", }); - internal_static_UpdateFragmentResponseProto_descriptor = - getDescriptor().getMessageTypes().get(20); - internal_static_UpdateFragmentResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_UpdateFragmentResponseProto_descriptor, - new java.lang.String[] { "Result", "IsGuaranteed", }); - internal_static_GetTokenRequestProto_descriptor = - getDescriptor().getMessageTypes().get(21); - internal_static_GetTokenRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GetTokenRequestProto_descriptor, - new java.lang.String[] { "AppId", }); - internal_static_GetTokenResponseProto_descriptor = - getDescriptor().getMessageTypes().get(22); - internal_static_GetTokenResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GetTokenResponseProto_descriptor, - new java.lang.String[] { "Token", }); - internal_static_LlapOutputSocketInitMessage_descriptor = - getDescriptor().getMessageTypes().get(23); - internal_static_LlapOutputSocketInitMessage_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_LlapOutputSocketInitMessage_descriptor, - new java.lang.String[] { "FragmentId", "Token", }); - internal_static_PurgeCacheRequestProto_descriptor = - getDescriptor().getMessageTypes().get(24); - internal_static_PurgeCacheRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_PurgeCacheRequestProto_descriptor, - new java.lang.String[] { }); - internal_static_PurgeCacheResponseProto_descriptor = - getDescriptor().getMessageTypes().get(25); - internal_static_PurgeCacheResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_PurgeCacheResponseProto_descriptor, - new java.lang.String[] { "PurgedMemoryBytes", }); - internal_static_MapEntry_descriptor = - getDescriptor().getMessageTypes().get(26); - internal_static_MapEntry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_MapEntry_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_GetDaemonMetricsRequestProto_descriptor = - getDescriptor().getMessageTypes().get(27); - internal_static_GetDaemonMetricsRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GetDaemonMetricsRequestProto_descriptor, - new java.lang.String[] { }); - internal_static_GetDaemonMetricsResponseProto_descriptor = - getDescriptor().getMessageTypes().get(28); - internal_static_GetDaemonMetricsResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GetDaemonMetricsResponseProto_descriptor, - new java.lang.String[] { "Metrics", }); - internal_static_SetCapacityRequestProto_descriptor = - getDescriptor().getMessageTypes().get(29); - internal_static_SetCapacityRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SetCapacityRequestProto_descriptor, - new java.lang.String[] { "ExecutorNum", "QueueSize", }); - internal_static_SetCapacityResponseProto_descriptor = - getDescriptor().getMessageTypes().get(30); - internal_static_SetCapacityResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_SetCapacityResponseProto_descriptor, - new java.lang.String[] { }); - internal_static_EvictEntityRequestProto_descriptor = - getDescriptor().getMessageTypes().get(31); - internal_static_EvictEntityRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_EvictEntityRequestProto_descriptor, - new java.lang.String[] { "DbName", "Table", }); - internal_static_TableProto_descriptor = - getDescriptor().getMessageTypes().get(32); - internal_static_TableProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_TableProto_descriptor, - new java.lang.String[] { "TableName", "PartKey", "PartVal", }); - internal_static_EvictEntityResponseProto_descriptor = - getDescriptor().getMessageTypes().get(33); - internal_static_EvictEntityResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_EvictEntityResponseProto_descriptor, - new java.lang.String[] { "EvictedBytes", }); - internal_static_GetCacheContentRequestProto_descriptor = - getDescriptor().getMessageTypes().get(34); - internal_static_GetCacheContentRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GetCacheContentRequestProto_descriptor, - new java.lang.String[] { }); - internal_static_GetCacheContentResponseProto_descriptor = - getDescriptor().getMessageTypes().get(35); - internal_static_GetCacheContentResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_GetCacheContentResponseProto_descriptor, - new java.lang.String[] { "Result", }); - internal_static_CacheEntryList_descriptor = - getDescriptor().getMessageTypes().get(36); - internal_static_CacheEntryList_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_CacheEntryList_descriptor, - new java.lang.String[] { "Entries", }); - internal_static_CacheEntry_descriptor = - getDescriptor().getMessageTypes().get(37); - internal_static_CacheEntry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_CacheEntry_descriptor, - new java.lang.String[] { "FileKey", "FilePath", "CacheTag", "Ranges", }); - internal_static_CacheTag_descriptor = - getDescriptor().getMessageTypes().get(38); - internal_static_CacheTag_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_CacheTag_descriptor, - new java.lang.String[] { "TableName", "PartitionDesc", }); - internal_static_CacheEntryRange_descriptor = - getDescriptor().getMessageTypes().get(39); - internal_static_CacheEntryRange_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_CacheEntryRange_descriptor, - new java.lang.String[] { "Start", "End", }); + }, assigner); } // @@protoc_insertion_point(outer_class_scope) diff --git a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/plugin/rpc/LlapPluginProtocolProtos.java b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/plugin/rpc/LlapPluginProtocolProtos.java index adb65cf27c27..dbcd895f77a0 100644 --- a/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/plugin/rpc/LlapPluginProtocolProtos.java +++ b/llap-common/src/gen/protobuf/gen-java/org/apache/hadoop/hive/llap/plugin/rpc/LlapPluginProtocolProtos.java @@ -5,27 +5,19 @@ public final class LlapPluginProtocolProtos { private LlapPluginProtocolProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistryLite registry) { - } - public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions( - (com.google.protobuf.ExtensionRegistryLite) registry); } - public interface UpdateQueryRequestProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:UpdateQueryRequestProto) - com.google.protobuf.MessageOrBuilder { + public interface UpdateQueryRequestProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional int32 guaranteed_task_count = 1; /** * optional int32 guaranteed_task_count = 1; - * @return Whether the guaranteedTaskCount field is set. */ boolean hasGuaranteedTaskCount(); /** * optional int32 guaranteed_task_count = 1; - * @return The guaranteedTaskCount. */ int getGuaranteedTaskCount(); } @@ -33,37 +25,35 @@ public interface UpdateQueryRequestProtoOrBuilder extends * Protobuf type {@code UpdateQueryRequestProto} */ public static final class UpdateQueryRequestProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:UpdateQueryRequestProto) - UpdateQueryRequestProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements UpdateQueryRequestProtoOrBuilder { // Use UpdateQueryRequestProto.newBuilder() to construct. - private UpdateQueryRequestProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private UpdateQueryRequestProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private UpdateQueryRequestProto() { + private UpdateQueryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateQueryRequestProto defaultInstance; + public static UpdateQueryRequestProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new UpdateQueryRequestProto(); + public UpdateQueryRequestProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private UpdateQueryRequestProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -75,27 +65,25 @@ private UpdateQueryRequestProto( case 0: done = true; break; - case 8: { - bitField0_ |= 0x00000001; - guaranteedTaskCount_ = input.readInt32(); - break; - } default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; } + case 8: { + bitField0_ |= 0x00000001; + guaranteedTaskCount_ = input.readInt32(); + break; + } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -106,69 +94,88 @@ private UpdateQueryRequestProto( return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.class, org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateQueryRequestProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateQueryRequestProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional int32 guaranteed_task_count = 1; public static final int GUARANTEED_TASK_COUNT_FIELD_NUMBER = 1; private int guaranteedTaskCount_; /** * optional int32 guaranteed_task_count = 1; - * @return Whether the guaranteedTaskCount field is set. */ - @java.lang.Override public boolean hasGuaranteedTaskCount() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int32 guaranteed_task_count = 1; - * @return The guaranteedTaskCount. */ - @java.lang.Override public int getGuaranteedTaskCount() { return guaranteedTaskCount_; } + private void initFields() { + guaranteedTaskCount_ = 0; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt32(1, guaranteedTaskCount_); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(1, guaranteedTaskCount_); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -179,42 +186,34 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto other = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto) obj; - if (hasGuaranteedTaskCount() != other.hasGuaranteedTaskCount()) return false; + boolean result = true; + result = result && (hasGuaranteedTaskCount() == other.hasGuaranteedTaskCount()); if (hasGuaranteedTaskCount()) { - if (getGuaranteedTaskCount() - != other.getGuaranteedTaskCount()) return false; + result = result && (getGuaranteedTaskCount() + == other.getGuaranteedTaskCount()); } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasGuaranteedTaskCount()) { hash = (37 * hash) + GUARANTEED_TASK_COUNT_FIELD_NUMBER; hash = (53 * hash) + getGuaranteedTaskCount(); } - hash = (29 * hash) + unknownFields.hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -238,59 +237,46 @@ public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.Up } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -298,16 +284,14 @@ protected Builder newBuilderForType( * Protobuf type {@code UpdateQueryRequestProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:UpdateQueryRequestProto) - org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -320,16 +304,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); guaranteedTaskCount_ = 0; @@ -337,18 +323,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto build() { org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto result = buildPartial(); if (!result.isInitialized()) { @@ -357,53 +344,19 @@ public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQue return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto buildPartial() { org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto result = new org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.guaranteedTaskCount_ = guaranteedTaskCount_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } + result.guaranteedTaskCount_ = guaranteedTaskCount_; result.bitField0_ = to_bitField0_; onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto) { return mergeFrom((org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto)other); @@ -418,17 +371,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtoc if (other.hasGuaranteedTaskCount()) { setGuaranteedTaskCount(other.getGuaranteedTaskCount()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -438,7 +388,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -448,27 +398,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional int32 guaranteed_task_count = 1; private int guaranteedTaskCount_ ; /** * optional int32 guaranteed_task_count = 1; - * @return Whether the guaranteedTaskCount field is set. */ - @java.lang.Override public boolean hasGuaranteedTaskCount() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional int32 guaranteed_task_count = 1; - * @return The guaranteedTaskCount. */ - @java.lang.Override public int getGuaranteedTaskCount() { return guaranteedTaskCount_; } /** * optional int32 guaranteed_task_count = 1; - * @param value The guaranteedTaskCount to set. - * @return This builder for chaining. */ public Builder setGuaranteedTaskCount(int value) { bitField0_ |= 0x00000001; @@ -478,7 +423,6 @@ public Builder setGuaranteedTaskCount(int value) { } /** * optional int32 guaranteed_task_count = 1; - * @return This builder for chaining. */ public Builder clearGuaranteedTaskCount() { bitField0_ = (bitField0_ & ~0x00000001); @@ -486,98 +430,54 @@ public Builder clearGuaranteedTaskCount() { onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:UpdateQueryRequestProto) } - // @@protoc_insertion_point(class_scope:UpdateQueryRequestProto) - private static final org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto(); - } - - public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public UpdateQueryRequestProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UpdateQueryRequestProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new UpdateQueryRequestProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:UpdateQueryRequestProto) } - public interface UpdateQueryResponseProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:UpdateQueryResponseProto) - com.google.protobuf.MessageOrBuilder { + public interface UpdateQueryResponseProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { } /** * Protobuf type {@code UpdateQueryResponseProto} */ public static final class UpdateQueryResponseProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:UpdateQueryResponseProto) - UpdateQueryResponseProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements UpdateQueryResponseProtoOrBuilder { // Use UpdateQueryResponseProto.newBuilder() to construct. - private UpdateQueryResponseProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private UpdateQueryResponseProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private UpdateQueryResponseProto() { + private UpdateQueryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final UpdateQueryResponseProto defaultInstance; + public static UpdateQueryResponseProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new UpdateQueryResponseProto(); + public UpdateQueryResponseProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private UpdateQueryResponseProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); try { @@ -589,8 +489,8 @@ private UpdateQueryResponseProto( done = true; break; default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { done = true; } break; @@ -599,11 +499,9 @@ private UpdateQueryResponseProto( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -614,42 +512,63 @@ private UpdateQueryResponseProto( return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.class, org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.Builder.class); } - private byte memoizedIsInitialized = -1; + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public UpdateQueryResponseProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new UpdateQueryResponseProto(input, extensionRegistry); + } + }; + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + private void initFields() { + } + private byte memoizedIsInitialized = -1; public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - unknownFields.writeTo(output); + getSerializedSize(); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; + @java.lang.Override + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); + } + @java.lang.Override public boolean equals(final java.lang.Object obj) { if (obj == this) { @@ -660,33 +579,25 @@ public boolean equals(final java.lang.Object obj) { } org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto other = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) obj; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + boolean result = true; + result = result && + getUnknownFields().equals(other.getUnknownFields()); + return result; } + private int memoizedHashCode = 0; @java.lang.Override public int hashCode() { if (memoizedHashCode != 0) { return memoizedHashCode; } int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); + hash = (19 * hash) + getDescriptorForType().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; } - public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -710,59 +621,46 @@ public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.Up } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -770,16 +668,14 @@ protected Builder newBuilderForType( * Protobuf type {@code UpdateQueryResponseProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:UpdateQueryResponseProto) - org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -792,33 +688,36 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto getDefaultInstanceForType() { return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto build() { org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto result = buildPartial(); if (!result.isInitialized()) { @@ -827,46 +726,12 @@ public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQue return result; } - @java.lang.Override public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto buildPartial() { org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto result = new org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto(this); onBuilt(); return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) { return mergeFrom((org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto)other); @@ -878,17 +743,14 @@ public Builder mergeFrom(com.google.protobuf.Message other) { public Builder mergeFrom(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto other) { if (other == org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance()) return this; - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -898,7 +760,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -906,57 +768,16 @@ public Builder mergeFrom( } return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:UpdateQueryResponseProto) } - // @@protoc_insertion_point(class_scope:UpdateQueryResponseProto) - private static final org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto(); - } - - public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public UpdateQueryResponseProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new UpdateQueryResponseProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new UpdateQueryResponseProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:UpdateQueryResponseProto) } /** @@ -1191,22 +1012,22 @@ public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQue // @@protoc_insertion_point(class_scope:LlapPluginProtocol) } - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_UpdateQueryRequestProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UpdateQueryRequestProto_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_UpdateQueryResponseProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_UpdateQueryResponseProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } - private static com.google.protobuf.Descriptors.FileDescriptor + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { @@ -1218,22 +1039,30 @@ public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQue "toBH\n&org.apache.hadoop.hive.llap.plugin" + ".rpcB\030LlapPluginProtocolProtos\210\001\001\240\001\001" }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_UpdateQueryRequestProto_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_UpdateQueryRequestProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateQueryRequestProto_descriptor, + new java.lang.String[] { "GuaranteedTaskCount", }); + internal_static_UpdateQueryResponseProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_UpdateQueryResponseProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_UpdateQueryResponseProto_descriptor, + new java.lang.String[] { }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { - }); - internal_static_UpdateQueryRequestProto_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_UpdateQueryRequestProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_UpdateQueryRequestProto_descriptor, - new java.lang.String[] { "GuaranteedTaskCount", }); - internal_static_UpdateQueryResponseProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_UpdateQueryResponseProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_UpdateQueryResponseProto_descriptor, - new java.lang.String[] { }); + }, assigner); } // @@protoc_insertion_point(outer_class_scope) diff --git a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapSignerImpl.java b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapSignerImpl.java index a7fc398892ff..047e17686b7a 100644 --- a/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapSignerImpl.java +++ b/llap-common/src/java/org/apache/hadoop/hive/llap/security/LlapSignerImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.llap.security; import java.io.IOException; -import java.util.Arrays; +import java.security.MessageDigest; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; @@ -58,7 +58,9 @@ public SignedMessage serializeAndSign(Signable message) throws IOException { public void checkSignature(byte[] message, byte[] signature, int keyId) throws SecurityException { byte[] expectedSignature = secretManager.signWithKey(message, keyId); - if (Arrays.equals(signature, expectedSignature)) return; + if (MessageDigest.isEqual(signature, expectedSignature)) { + return; + } throw new SecurityException("Message signature does not match"); } diff --git a/llap-ext-client/pom.xml b/llap-ext-client/pom.xml index 439064091ae8..ffc2c695fc6d 100644 --- a/llap-ext-client/pom.xml +++ b/llap-ext-client/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-llap-ext-client diff --git a/llap-server/pom.xml b/llap-server/pom.xml index 1e04914b1a88..d90296d6023c 100644 --- a/llap-server/pom.xml +++ b/llap-server/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-llap-server diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/AsyncTaskCopyAuxJars.java b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/AsyncTaskCopyAuxJars.java index d185d1f58f8e..675f45284af1 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/AsyncTaskCopyAuxJars.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/cli/service/AsyncTaskCopyAuxJars.java @@ -45,8 +45,8 @@ class AsyncTaskCopyAuxJars implements Callable { private static final String[] DEFAULT_AUX_CLASSES = new String[] {"org.apache.hive.hcatalog.data.JsonSerDe", "org.apache.hadoop.hive.druid.DruidStorageHandler", - "org.apache.hive.storage.jdbc.JdbcStorageHandler", "org.apache.commons.dbcp.BasicDataSourceFactory", - "org.apache.commons.pool.impl.GenericObjectPool", "org.apache.hadoop.hive.kafka.KafkaStorageHandler", + "org.apache.hive.storage.jdbc.JdbcStorageHandler", "org.apache.commons.dbcp2.BasicDataSourceFactory", + "org.apache.commons.pool2.impl.GenericObjectPool", "org.apache.hadoop.hive.kafka.KafkaStorageHandler", "org.apache.hadoop.hive.kudu.KuduStorageHandler"}; private static final String HBASE_SERDE_CLASS = "org.apache.hadoop.hive.hbase.HBaseSerDe"; diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java index 315752dc2197..ced6c0d20d14 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/io/api/impl/LlapRecordReader.java @@ -153,7 +153,7 @@ private LlapRecordReader(MapWork mapWork, JobConf job, FileSplit split, this.sarg = ConvertAstToSearchArg.createFromConf(job); final String fragmentId = LlapTezUtils.getFragmentId(job); final String dagId = LlapTezUtils.getDagId(job); - final String queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVEQUERYID); + final String queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_QUERY_ID); MDC.put("dagId", dagId); MDC.put("queryId", queryId); TezCounters taskCounters = null; @@ -853,7 +853,7 @@ public String[] getOriginalColumnNames(TypeDescription fileSchema) { @Override public String getQueryId() { - return HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVEQUERYID); + return HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_QUERY_ID); } @Override diff --git a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java index 70f568ee9041..9893210bd93c 100644 --- a/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java +++ b/llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java @@ -299,7 +299,7 @@ public void operationComplete(ChannelFuture future) throws Exception { final String BOSS_THREAD_NAME_PREFIX = "ShuffleHandler Netty Boss #"; AtomicInteger bossThreadCounter = new AtomicInteger(0); - bossGroup = new NioEventLoopGroup(maxShuffleThreads, new ThreadFactory() { + bossGroup = new NioEventLoopGroup(1, new ThreadFactory() { @Override public Thread newThread(Runnable r) { return new Thread(r, BOSS_THREAD_NAME_PREFIX + bossThreadCounter.incrementAndGet()); diff --git a/llap-tez/pom.xml b/llap-tez/pom.xml index 0c2622dfcbd1..64d33a5a695f 100644 --- a/llap-tez/pom.xml +++ b/llap-tez/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-llap-tez @@ -48,6 +48,16 @@ org.apache.zookeeper zookeeper + + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + + org.apache.curator diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java index 1b23b6a54807..d850473ffb56 100644 --- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java +++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskCommunicator.java @@ -886,14 +886,14 @@ private void resetCurrentDag(int newDagId, String hiveQueryId) { private String extractQueryId(TaskSpec taskSpec) throws IOException { UserPayload processorPayload = taskSpec.getProcessorDescriptor().getUserPayload(); Configuration conf = TezUtils.createConfFromUserPayload(processorPayload); - return HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + return HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); } private String extractQueryIdFromContext() { //TODO: Remove following instance of check, When TEZ-2672 exposes getConf from DagInfo DagInfo dagInfo = getContext().getCurrentDagInfo(); if (dagInfo instanceof DAG) { - return ((DAG)dagInfo).getConf().get(ConfVars.HIVEQUERYID.varname); + return ((DAG)dagInfo).getConf().get(ConfVars.HIVE_QUERY_ID.varname); } return null; } diff --git a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java index 85c34ddfc067..8ead0f69f683 100644 --- a/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java +++ b/llap-tez/src/java/org/apache/hadoop/hive/llap/tezplugins/LlapTaskSchedulerService.java @@ -848,7 +848,7 @@ public void run() { if (amRegistry != null) { amRegistry.start(); int pluginPort = pluginEndpoint != null ? pluginEndpoint.getActualPort() : -1; - amRegistry.register(amPort, pluginPort, HiveConf.getVar(conf, ConfVars.HIVESESSIONID), + amRegistry.register(amPort, pluginPort, HiveConf.getVar(conf, ConfVars.HIVE_SESSION_ID), serializedToken, jobIdForToken, 0); } diff --git a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java index 5a23d6d6cc1c..5a3a33b5452b 100644 --- a/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java +++ b/llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java @@ -353,7 +353,7 @@ TaskSpec registerRunningTaskAttempt(ContainerId containerId, int taskIdx) { private TaskSpec createBaseTaskSpec(String vertexName, TezVertexID vertexId, int taskIdx) { TaskSpec taskSpec = mock(TaskSpec.class); Configuration conf = new Configuration(false); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, "fakeQueryId"); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "fakeQueryId"); UserPayload userPayload; try { userPayload = TezUtils.createUserPayloadFromConf(conf); diff --git a/metastore/pom.xml b/metastore/pom.xml index 231a0d2e3daf..cbcdabf05a17 100644 --- a/metastore/pom.xml +++ b/metastore/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-metastore diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.0.0-beta-2.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql similarity index 99% rename from metastore/scripts/upgrade/hive/hive-schema-4.0.0-beta-2.hive.sql rename to metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql index 51eb17df42d7..689bb7633892 100644 --- a/metastore/scripts/upgrade/hive/hive-schema-4.0.0-beta-2.hive.sql +++ b/metastore/scripts/upgrade/hive/hive-schema-4.0.0.hive.sql @@ -823,8 +823,8 @@ FROM \"PART_COL_STATS\"" ); -CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0-beta-2' AS `SCHEMA_VERSION`, - 'Hive release version 4.0.0-beta-2' AS `VERSION_COMMENT`; +CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.0.0' AS `SCHEMA_VERSION`, + 'Hive release version 4.0.0' AS `VERSION_COMMENT`; CREATE EXTERNAL TABLE IF NOT EXISTS `DB_VERSION` ( `VER_ID` BIGINT, diff --git a/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql b/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql new file mode 100644 index 000000000000..7a1cef3f97ab --- /dev/null +++ b/metastore/scripts/upgrade/hive/hive-schema-4.1.0.hive.sql @@ -0,0 +1,2121 @@ +-- HIVE system db + +CREATE DATABASE IF NOT EXISTS SYS; + +USE SYS; + +CREATE EXTERNAL TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint, + `BUCKET_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_BUCKETING_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"BUCKET_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"BUCKETING_COLS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint, + CONSTRAINT `SYS_PK_CDS` PRIMARY KEY (`CD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\" +FROM + \"CDS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint, + `COMMENT` string, + `COLUMN_NAME` string, + `TYPE_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_COLUMN_V2` PRIMARY KEY (`CD_ID`,`COLUMN_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CD_ID\", + \"COMMENT\", + \"COLUMN_NAME\", + \"TYPE_NAME\", + \"INTEGER_IDX\" +FROM + \"COLUMNS_V2\"" +); + + +CREATE EXTERNAL TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_DATABASE_PARAMS` PRIMARY KEY (`DB_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"DATABASE_PARAMS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint, + `DB_LOCATION_URI` string, + `NAME` string, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_DBS` PRIMARY KEY (`DB_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_ID\", + \"DB_LOCATION_URI\", + \"NAME\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"DBS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `DB_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_DB_PRIVS` PRIMARY KEY (`DB_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"DB_GRANT_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"DB_PRIV\", + \"AUTHORIZER\" +FROM + \"DB_PRIVS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` string, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `USER_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_GLOBAL_PRIVS` PRIMARY KEY (`USER_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"USER_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"USER_PRIV\", + \"AUTHORIZER\" +FROM + \"GLOBAL_PRIVS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint, + `CREATE_TIME` int, + `LAST_ACCESS_TIME` int, + `PART_NAME` string, + `SD_ID` bigint, + `TBL_ID` bigint, + CONSTRAINT `SYS_PK_PARTITIONS` PRIMARY KEY (`PART_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"CREATE_TIME\", + \"LAST_ACCESS_TIME\", + \"PART_NAME\", + \"SD_ID\", + \"TBL_ID\" +FROM + \"PARTITIONS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint, + `PKEY_COMMENT` string, + `PKEY_NAME` string, + `PKEY_TYPE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEYS` PRIMARY KEY (`TBL_ID`,`PKEY_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PKEY_COMMENT\", + \"PKEY_NAME\", + \"PKEY_TYPE\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEYS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint, + `PART_KEY_VAL` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_PARTITION_KEY_VALS` PRIMARY KEY (`PART_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PART_KEY_VAL\", + \"INTEGER_IDX\" +FROM + \"PARTITION_KEY_VALS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_PARTITION_PARAMS` PRIMARY KEY (`PART_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"PARTITION_PARAMS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_COL_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_PART_COL_PRIVS` PRIMARY KEY (`PART_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_COL_PRIV\", + \"AUTHORIZER\" +FROM + \"PART_COL_PRIVS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PART_ID` bigint, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `PART_PRIV` string, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_PART_PRIVS` PRIMARY KEY (`PART_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"PART_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PART_ID\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"PART_PRIV\", + \"AUTHORIZER\" +FROM + \"PART_PRIVS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint, + `CREATE_TIME` int, + `OWNER_NAME` string, + `ROLE_NAME` string, + CONSTRAINT `SYS_PK_ROLES` PRIMARY KEY (`ROLE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_ID\", + \"CREATE_TIME\", + \"OWNER_NAME\", + \"ROLE_NAME\" +FROM + \"ROLES\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint, + `ADD_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `ROLE_ID` bigint, + CONSTRAINT `SYS_PK_ROLE_MAP` PRIMARY KEY (`ROLE_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"ROLE_GRANT_ID\", + \"ADD_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"ROLE_ID\" +FROM + \"ROLE_MAP\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint, + `CD_ID` bigint, + `INPUT_FORMAT` string, + `IS_COMPRESSED` boolean, + `IS_STOREDASSUBDIRECTORIES` boolean, + `LOCATION` string, + `NUM_BUCKETS` int, + `OUTPUT_FORMAT` string, + `SERDE_ID` bigint, + CONSTRAINT `SYS_PK_SDS` PRIMARY KEY (`SD_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"CD_ID\", + \"INPUT_FORMAT\", + \"IS_COMPRESSED\", + \"IS_STOREDASSUBDIRECTORIES\", + \"LOCATION\", + \"NUM_BUCKETS\", + \"OUTPUT_FORMAT\", + \"SERDE_ID\" +FROM + \"SDS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SD_PARAMS` PRIMARY KEY (`SD_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SD_PARAMS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` string, + `NEXT_VAL` bigint, + CONSTRAINT `SYS_PK_SEQUENCE_TABLE` PRIMARY KEY (`SEQUENCE_NAME`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SEQUENCE_NAME\", + \"NEXT_VAL\" +FROM + \"SEQUENCE_TABLE\"" +); + + +CREATE EXTERNAL TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint, + `NAME` string, + `SLIB` string, + CONSTRAINT `SYS_PK_SERDES` PRIMARY KEY (`SERDE_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"NAME\", + \"SLIB\" +FROM + \"SERDES\"" +); + + +CREATE EXTERNAL TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_SERDE_PARAMS` PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SERDE_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"SERDE_PARAMS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint, + `SKEWED_COL_NAME` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_COL_NAMES` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"SKEWED_COL_NAME\", + \"INTEGER_IDX\" +FROM + \"SKEWED_COL_NAMES\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint, + `STRING_LIST_ID_KID` bigint, + `LOCATION` string, + CONSTRAINT `SYS_PK_COL_VALUE_LOC_MAP` PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"STRING_LIST_ID_KID\", + \"LOCATION\" +FROM + \"SKEWED_COL_VALUE_LOC_MAP\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST` PRIMARY KEY (`STRING_LIST_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\" +FROM + \"SKEWED_STRING_LIST\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint, + `STRING_LIST_VALUE` string, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_STRING_LIST_VALUES` PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"STRING_LIST_ID\", + \"STRING_LIST_VALUE\", + \"INTEGER_IDX\" +FROM + \"SKEWED_STRING_LIST_VALUES\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint, + `STRING_LIST_ID_EID` bigint, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SKEWED_VALUES` PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID_OID\", + \"STRING_LIST_ID_EID\", + \"INTEGER_IDX\" +FROM + \"SKEWED_VALUES\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint, + `COLUMN_NAME` string, + `ORDER` int, + `INTEGER_IDX` int, + CONSTRAINT `SYS_PK_SORT_COLS` PRIMARY KEY (`SD_ID`,`INTEGER_IDX`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SD_ID\", + \"COLUMN_NAME\", + \"ORDER\", + \"INTEGER_IDX\" +FROM + \"SORT_COLS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint, + `PARAM_KEY` string, + `PARAM_VALUE` string, + CONSTRAINT `SYS_PK_TABLE_PARAMS` PRIMARY KEY (`TBL_ID`,`PARAM_KEY`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"PARAM_KEY\", + \"PARAM_VALUE\" +FROM + \"TABLE_PARAMS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint, + `CREATE_TIME` int, + `DB_ID` bigint, + `LAST_ACCESS_TIME` int, + `OWNER` string, + `RETENTION` int, + `SD_ID` bigint, + `TBL_NAME` string, + `TBL_TYPE` string, + `VIEW_EXPANDED_TEXT` string, + `VIEW_ORIGINAL_TEXT` string, + `IS_REWRITE_ENABLED` boolean, + CONSTRAINT `SYS_PK_TBLS` PRIMARY KEY (`TBL_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_ID\", + \"CREATE_TIME\", + \"DB_ID\", + \"LAST_ACCESS_TIME\", + \"OWNER\", + \"RETENTION\", + \"SD_ID\", + \"TBL_NAME\", + \"TBL_TYPE\", + \"VIEW_EXPANDED_TEXT\", + \"VIEW_ORIGINAL_TEXT\", + \"IS_REWRITE_ENABLED\" +FROM \"TBLS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint, + `DB_NAME` string, + `TBL_NAME` string, + `TXN_LIST` string, + CONSTRAINT `SYS_PK_MV_CREATION_METADATA` PRIMARY KEY (`MV_CREATION_METADATA_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"DB_NAME\", + \"TBL_NAME\", + \"TXN_LIST\" +FROM \"MV_CREATION_METADATA\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint, + `TBL_ID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MV_CREATION_METADATA_ID\", + \"TBL_ID\" +FROM \"MV_TABLES_USED\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint, + `COLUMN_NAME` string, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_COL_PRIV` string, + `TBL_ID` bigint, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_TBL_COL_PRIVS` PRIMARY KEY (`TBL_COLUMN_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_COLUMN_GRANT_ID\", + \"COLUMN_NAME\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_COL_PRIV\", + \"TBL_ID\", + \"AUTHORIZER\" +FROM + \"TBL_COL_PRIVS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint, + `CREATE_TIME` int, + `GRANT_OPTION` int, + `GRANTOR` string, + `GRANTOR_TYPE` string, + `PRINCIPAL_NAME` string, + `PRINCIPAL_TYPE` string, + `TBL_PRIV` string, + `TBL_ID` bigint, + `AUTHORIZER` string, + CONSTRAINT `SYS_PK_TBL_PRIVS` PRIMARY KEY (`TBL_GRANT_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TBL_GRANT_ID\", + \"CREATE_TIME\", + \"GRANT_OPTION\", + \"GRANTOR\", + \"GRANTOR_TYPE\", + \"PRINCIPAL_NAME\", + \"PRINCIPAL_TYPE\", + \"TBL_PRIV\", + \"TBL_ID\", + \"AUTHORIZER\" +FROM + \"TBL_PRIVS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `TBL_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + `ENGINE` string, + CONSTRAINT `SYS_PK_TAB_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"TBL_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\", + \"ENGINE\" +FROM + \"TAB_COL_STATS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint, + `DB_NAME` string, + `TABLE_NAME` string, + `PARTITION_NAME` string, + `COLUMN_NAME` string, + `COLUMN_TYPE` string, + `PART_ID` bigint, + `LONG_LOW_VALUE` bigint, + `LONG_HIGH_VALUE` bigint, + `DOUBLE_HIGH_VALUE` double, + `DOUBLE_LOW_VALUE` double, + `BIG_DECIMAL_LOW_VALUE` string, + `BIG_DECIMAL_HIGH_VALUE` string, + `NUM_NULLS` bigint, + `NUM_DISTINCTS` bigint, + `AVG_COL_LEN` double, + `MAX_COL_LEN` bigint, + `NUM_TRUES` bigint, + `NUM_FALSES` bigint, + `LAST_ANALYZED` bigint, + `ENGINE` string, + CONSTRAINT `SYS_PK_PART_COL_STATS` PRIMARY KEY (`CS_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CS_ID\", + \"DB_NAME\", + \"TABLE_NAME\", + \"PARTITION_NAME\", + \"COLUMN_NAME\", + \"COLUMN_TYPE\", + \"PART_ID\", + \"LONG_LOW_VALUE\", + \"LONG_HIGH_VALUE\", + \"DOUBLE_HIGH_VALUE\", + \"DOUBLE_LOW_VALUE\", + \"BIG_DECIMAL_LOW_VALUE\", + \"BIG_DECIMAL_HIGH_VALUE\", + \"NUM_NULLS\", + \"NUM_DISTINCTS\", + \"AVG_COL_LEN\", + \"MAX_COL_LEN\", + \"NUM_TRUES\", + \"NUM_FALSES\", + \"LAST_ANALYZED\", + \"ENGINE\" +FROM + \"PART_COL_STATS\"" +); + +CREATE OR REPLACE VIEW `VERSION` AS SELECT 1 AS `VER_ID`, '4.1.0' AS `SCHEMA_VERSION`, + 'Hive release version 4.1.0' AS `VERSION_COMMENT`; + +CREATE EXTERNAL TABLE IF NOT EXISTS `DB_VERSION` ( + `VER_ID` BIGINT, + `SCHEMA_VERSION` string, + `VERSION_COMMENT` string, + CONSTRAINT `SYS_PK_DB_VERSION` PRIMARY KEY (`VER_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"VER_ID\", + \"SCHEMA_VERSION\", + \"VERSION_COMMENT\" +FROM + \"VERSION\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` bigint, + `CLASS_NAME` string, + `CREATE_TIME` int, + `DB_ID` bigint, + `FUNC_NAME` string, + `FUNC_TYPE` int, + `OWNER_NAME` string, + `OWNER_TYPE` string, + CONSTRAINT `SYS_PK_FUNCS` PRIMARY KEY (`FUNC_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"FUNC_ID\", + \"CLASS_NAME\", + \"CREATE_TIME\", + \"DB_ID\", + \"FUNC_NAME\", + \"FUNC_TYPE\", + \"OWNER_NAME\", + \"OWNER_TYPE\" +FROM + \"FUNCS\"" +); + +-- CREATE EXTERNAL TABLE IF NOT EXISTS `FUNC_RU` ( +-- `FUNC_ID` bigint, +-- `RESOURCE_TYPE` int, +-- `RESOURCE_URI` string, +-- `INTEGER_IDX` int, +-- CONSTRAINT `SYS_PK_FUNCS_RU` PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`) DISABLE +-- ) +-- STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +-- TBLPROPERTIES ( +-- "hive.sql.database.type" = "METASTORE", +-- "hive.sql.query" = "SELECT * FROM FUNCS_RU" +-- ); + +CREATE EXTERNAL TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` bigint, + `CHILD_INTEGER_IDX` int, + `CHILD_TBL_ID` bigint, + `PARENT_CD_ID` bigint, + `PARENT_INTEGER_IDX` int, + `PARENT_TBL_ID` bigint, + `POSITION` bigint, + `CONSTRAINT_NAME` string, + `CONSTRAINT_TYPE` string, + `UPDATE_RULE` string, + `DELETE_RULE` string, + `ENABLE_VALIDATE_RELY` int, + `DEFAULT_VALUE` string, + CONSTRAINT `SYS_PK_KEY_CONSTRAINTS` PRIMARY KEY (`PARENT_TBL_ID`, `CONSTRAINT_NAME`, `POSITION`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"CHILD_CD_ID\", + \"CHILD_INTEGER_IDX\", + \"CHILD_TBL_ID\", + \"PARENT_CD_ID\", + \"PARENT_INTEGER_IDX\", + \"PARENT_TBL_ID\", + \"POSITION\", + \"CONSTRAINT_NAME\", + \"CONSTRAINT_TYPE\", + \"UPDATE_RULE\", + \"DELETE_RULE\", + \"ENABLE_VALIDATE_RELY\", + \"DEFAULT_VALUE\" +FROM + \"KEY_CONSTRAINTS\"" +); + +CREATE OR REPLACE VIEW `TABLE_STATS_VIEW` AS +SELECT + `TBL_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, + max(CASE `PARAM_KEY` WHEN 'transient_lastDdlTime' THEN `PARAM_VALUE` END) AS TRANSIENT_LAST_DDL_TIME +FROM `TABLE_PARAMS` GROUP BY `TBL_ID`; + +CREATE OR REPLACE VIEW `PARTITION_STATS_VIEW` AS +SELECT + `PART_ID`, + max(CASE `PARAM_KEY` WHEN 'COLUMN_STATS_ACCURATE' THEN `PARAM_VALUE` END) AS COLUMN_STATS_ACCURATE, + max(CASE `PARAM_KEY` WHEN 'numFiles' THEN `PARAM_VALUE` END) AS NUM_FILES, + max(CASE `PARAM_KEY` WHEN 'numRows' THEN `PARAM_VALUE` END) AS NUM_ROWS, + max(CASE `PARAM_KEY` WHEN 'rawDataSize' THEN `PARAM_VALUE` END) AS RAW_DATA_SIZE, + max(CASE `PARAM_KEY` WHEN 'totalSize' THEN `PARAM_VALUE` END) AS TOTAL_SIZE, + max(CASE `PARAM_KEY` WHEN 'transient_lastDdlTime' THEN `PARAM_VALUE` END) AS TRANSIENT_LAST_DDL_TIME +FROM `PARTITION_PARAMS` GROUP BY `PART_ID`; + +CREATE EXTERNAL TABLE IF NOT EXISTS `WM_RESOURCEPLANS` ( + `NAME` string, + `NS` string, + `STATUS` string, + `QUERY_PARALLELISM` int, + `DEFAULT_POOL_PATH` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"STATUS\", + \"WM_RESOURCEPLAN\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"PATH\" +FROM + \"WM_RESOURCEPLAN\" LEFT OUTER JOIN \"WM_POOL\" ON \"WM_RESOURCEPLAN\".\"DEFAULT_POOL_ID\" = \"WM_POOL\".\"POOL_ID\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `WM_TRIGGERS` ( + `RP_NAME` string, + `NS` string, + `NAME` string, + `TRIGGER_EXPRESSION` string, + `ACTION_EXPRESSION` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + r.\"NAME\" AS RP_NAME, + case when r.\"NS\" is null then 'default' else r.\"NS\" end, + t.\"NAME\" AS NAME, + \"TRIGGER_EXPRESSION\", + \"ACTION_EXPRESSION\" +FROM + \"WM_TRIGGER\" t +JOIN + \"WM_RESOURCEPLAN\" r +ON + t.\"RP_ID\" = r.\"RP_ID\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS` ( + `RP_NAME` string, + `NS` string, + `PATH` string, + `ALLOC_FRACTION` double, + `QUERY_PARALLELISM` int, + `SCHEDULING_POLICY` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"WM_POOL\".\"PATH\", + \"WM_POOL\".\"ALLOC_FRACTION\", + \"WM_POOL\".\"QUERY_PARALLELISM\", + \"WM_POOL\".\"SCHEDULING_POLICY\" +FROM + \"WM_POOL\" +JOIN + \"WM_RESOURCEPLAN\" +ON + \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `WM_POOLS_TO_TRIGGERS` ( + `RP_NAME` string, + `NS` string, + `POOL_PATH` string, + `TRIGGER_NAME` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"WM_POOL\".\"PATH\" AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_POOL_TO_TRIGGER\" + JOIN \"WM_POOL\" ON \"WM_POOL_TO_TRIGGER\".\"POOL_ID\" = \"WM_POOL\".\"POOL_ID\" + JOIN \"WM_TRIGGER\" ON \"WM_POOL_TO_TRIGGER\".\"TRIGGER_ID\" = \"WM_TRIGGER\".\"TRIGGER_ID\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_POOL\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +UNION +SELECT + \"WM_RESOURCEPLAN\".\"NAME\" AS RP_NAME, + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + '' AS POOL_PATH, + \"WM_TRIGGER\".\"NAME\" AS TRIGGER_NAME +FROM \"WM_TRIGGER\" + JOIN \"WM_RESOURCEPLAN\" ON \"WM_TRIGGER\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +WHERE CAST(\"WM_TRIGGER\".\"IS_IN_UNMANAGED\" AS CHAR) IN ('1', 't') +" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `WM_MAPPINGS` ( + `RP_NAME` string, + `NS` string, + `ENTITY_TYPE` string, + `ENTITY_NAME` string, + `POOL_PATH` string, + `ORDERING` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"WM_RESOURCEPLAN\".\"NAME\", + case when \"WM_RESOURCEPLAN\".\"NS\" is null then 'default' else \"WM_RESOURCEPLAN\".\"NS\" end AS NS, + \"ENTITY_TYPE\", + \"ENTITY_NAME\", + case when \"WM_POOL\".\"PATH\" is null then '' else \"WM_POOL\".\"PATH\" end, + \"ORDERING\" +FROM \"WM_MAPPING\" +JOIN \"WM_RESOURCEPLAN\" ON \"WM_MAPPING\".\"RP_ID\" = \"WM_RESOURCEPLAN\".\"RP_ID\" +LEFT OUTER JOIN \"WM_POOL\" ON \"WM_POOL\".\"POOL_ID\" = \"WM_MAPPING\".\"POOL_ID\" +" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `COMPACTION_QUEUE` ( + `CQ_ID` bigint, + `CQ_DATABASE` string, + `CQ_TABLE` string, + `CQ_PARTITION` string, + `CQ_STATE` string, + `CQ_TYPE` string, + `CQ_TBLPROPERTIES` string, + `CQ_WORKER_ID` string, + `CQ_ENQUEUE_TIME` bigint, + `CQ_START` bigint, + `CQ_RUN_AS` string, + `CQ_HIGHEST_WRITE_ID` bigint, + `CQ_HADOOP_JOB_ID` string, + `CQ_ERROR_MESSAGE` string, + `CQ_NEXT_TXN_ID` bigint, + `CQ_TXN_ID` bigint, + `CQ_COMMIT_TIME` bigint, + `CQ_INITIATOR_ID` string, + `CQ_INITIATOR_VERSION` string, + `CQ_WORKER_VERSION` string, + `CQ_CLEANER_START` bigint, + `CQ_POOL_NAME` string, + `CQ_NUMBER_OF_BUCKETS` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPACTION_QUEUE\".\"CQ_ID\", + \"COMPACTION_QUEUE\".\"CQ_DATABASE\", + \"COMPACTION_QUEUE\".\"CQ_TABLE\", + \"COMPACTION_QUEUE\".\"CQ_PARTITION\", + \"COMPACTION_QUEUE\".\"CQ_STATE\", + \"COMPACTION_QUEUE\".\"CQ_TYPE\", + \"COMPACTION_QUEUE\".\"CQ_TBLPROPERTIES\", + \"COMPACTION_QUEUE\".\"CQ_WORKER_ID\", + \"COMPACTION_QUEUE\".\"CQ_ENQUEUE_TIME\", + \"COMPACTION_QUEUE\".\"CQ_START\", + \"COMPACTION_QUEUE\".\"CQ_RUN_AS\", + \"COMPACTION_QUEUE\".\"CQ_HIGHEST_WRITE_ID\", + \"COMPACTION_QUEUE\".\"CQ_HADOOP_JOB_ID\", + \"COMPACTION_QUEUE\".\"CQ_ERROR_MESSAGE\", + \"COMPACTION_QUEUE\".\"CQ_NEXT_TXN_ID\", + \"COMPACTION_QUEUE\".\"CQ_TXN_ID\", + \"COMPACTION_QUEUE\".\"CQ_COMMIT_TIME\", + \"COMPACTION_QUEUE\".\"CQ_INITIATOR_ID\", + \"COMPACTION_QUEUE\".\"CQ_INITIATOR_VERSION\", + \"COMPACTION_QUEUE\".\"CQ_WORKER_VERSION\", + \"COMPACTION_QUEUE\".\"CQ_CLEANER_START\", + \"COMPACTION_QUEUE\".\"CQ_POOL_NAME\", + \"COMPACTION_QUEUE\".\"CQ_NUMBER_OF_BUCKETS\" +FROM \"COMPACTION_QUEUE\" +" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `COMPLETED_COMPACTIONS` ( + `CC_ID` bigint, + `CC_DATABASE` string, + `CC_TABLE` string, + `CC_PARTITION` string, + `CC_STATE` string, + `CC_TYPE` string, + `CC_TBLPROPERTIES` string, + `CC_WORKER_ID` string, + `CC_ENQUEUE_TIME` bigint, + `CC_START` bigint, + `CC_END` bigint, + `CC_RUN_AS` string, + `CC_HIGHEST_WRITE_ID` bigint, + `CC_HADOOP_JOB_ID` string, + `CC_ERROR_MESSAGE` string, + `CC_NEXT_TXN_ID` bigint, + `CC_TXN_ID` bigint, + `CC_COMMIT_TIME` bigint, + `CC_INITIATOR_ID` string, + `CC_INITIATOR_VERSION` string, + `CC_WORKER_VERSION` string, + `CC_POOL_NAME` string, + `CC_NUMBER_OF_BUCKETS` string +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"COMPLETED_COMPACTIONS\".\"CC_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_DATABASE\", + \"COMPLETED_COMPACTIONS\".\"CC_TABLE\", + \"COMPLETED_COMPACTIONS\".\"CC_PARTITION\", + \"COMPLETED_COMPACTIONS\".\"CC_STATE\", + \"COMPLETED_COMPACTIONS\".\"CC_TYPE\", + \"COMPLETED_COMPACTIONS\".\"CC_TBLPROPERTIES\", + \"COMPLETED_COMPACTIONS\".\"CC_WORKER_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_ENQUEUE_TIME\", + \"COMPLETED_COMPACTIONS\".\"CC_START\", + \"COMPLETED_COMPACTIONS\".\"CC_END\", + \"COMPLETED_COMPACTIONS\".\"CC_RUN_AS\", + \"COMPLETED_COMPACTIONS\".\"CC_HIGHEST_WRITE_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_HADOOP_JOB_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_ERROR_MESSAGE\", + \"COMPLETED_COMPACTIONS\".\"CC_NEXT_TXN_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_TXN_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_COMMIT_TIME\", + \"COMPLETED_COMPACTIONS\".\"CC_INITIATOR_ID\", + \"COMPLETED_COMPACTIONS\".\"CC_INITIATOR_VERSION\", + \"COMPLETED_COMPACTIONS\".\"CC_WORKER_VERSION\", + \"COMPLETED_COMPACTIONS\".\"CC_POOL_NAME\", + \"COMPLETED_COMPACTIONS\".\"CC_NUMBER_OF_BUCKETS\" +FROM \"COMPLETED_COMPACTIONS\" +" +); + +CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_WORKER_HOST`, + `C_WORKER_ID`, + `C_WORKER_VERSION`, + `C_ENQUEUE_TIME`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_ERROR_MESSAGE`, + `C_NEXT_TXN_ID`, + `C_TXN_ID`, + `C_COMMIT_TIME`, + `C_HIGHEST_WRITE_ID`, + `C_INITIATOR_HOST`, + `C_INITIATOR_ID`, + `C_INITIATOR_VERSION`, + `C_CLEANER_START`, + `C_POOL_NAME`, + `C_NUMBER_OF_BUCKETS`, + `C_TBLPROPERTIES` +) AS +SELECT + CC_ID, + 'default', + CC_DATABASE, + CC_TABLE, + CC_PARTITION, + CASE WHEN CC_TYPE = 'i' THEN 'minor' WHEN CC_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CC_STATE = 'f' THEN 'failed' WHEN CC_STATE = 's' THEN 'succeeded' + WHEN CC_STATE = 'a' THEN 'did not initiate' WHEN CC_STATE = 'c' THEN 'refused' ELSE 'UNKNOWN' END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[0] END, + CASE WHEN CC_WORKER_ID IS NULL THEN cast (null as string) ELSE split(CC_WORKER_ID,"-")[size(split(CC_WORKER_ID,"-"))-1] END, + CC_WORKER_VERSION, + FROM_UNIXTIME(CC_ENQUEUE_TIME DIV 1000), + FROM_UNIXTIME(CC_START DIV 1000), + CASE WHEN CC_END IS NULL THEN cast (null as string) ELSE CC_END-CC_START END, + CC_HADOOP_JOB_ID, + CC_RUN_AS, + CC_ERROR_MESSAGE, + CC_NEXT_TXN_ID, + CC_TXN_ID, + FROM_UNIXTIME(CC_COMMIT_TIME DIV 1000), + CC_HIGHEST_WRITE_ID, + CASE WHEN CC_INITIATOR_ID IS NULL THEN cast (null as string) ELSE split(CC_INITIATOR_ID,"-")[0] END, + CASE WHEN CC_INITIATOR_ID IS NULL THEN cast (null as string) ELSE split(CC_INITIATOR_ID,"-")[size(split(CC_INITIATOR_ID,"-"))-1] END, + CC_INITIATOR_VERSION, + NULL, + NVL(CC_POOL_NAME, 'default'), + CC_NUMBER_OF_BUCKETS, + CC_TBLPROPERTIES +FROM COMPLETED_COMPACTIONS +UNION ALL +SELECT + CQ_ID, + 'default', + CQ_DATABASE, + CQ_TABLE, + CQ_PARTITION, + CASE WHEN CQ_TYPE = 'i' THEN 'minor' WHEN CQ_TYPE = 'a' THEN 'major' ELSE 'UNKNOWN' END, + CASE WHEN CQ_STATE = 'i' THEN 'initiated' WHEN CQ_STATE = 'w' THEN 'working' WHEN CQ_STATE = 'r' THEN 'ready for cleaning' ELSE 'UNKNOWN' END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[0] END, + CASE WHEN CQ_WORKER_ID IS NULL THEN NULL ELSE split(CQ_WORKER_ID,"-")[size(split(CQ_WORKER_ID,"-"))-1] END, + CQ_WORKER_VERSION, + FROM_UNIXTIME(CQ_ENQUEUE_TIME DIV 1000), + FROM_UNIXTIME(CQ_START DIV 1000), + cast (null as string), + CQ_HADOOP_JOB_ID, + CQ_RUN_AS, + CQ_ERROR_MESSAGE, + CQ_NEXT_TXN_ID, + CQ_TXN_ID, + FROM_UNIXTIME(CQ_COMMIT_TIME DIV 1000), + CQ_HIGHEST_WRITE_ID, + CASE WHEN CQ_INITIATOR_ID IS NULL THEN NULL ELSE split(CQ_INITIATOR_ID,"-")[0] END, + CASE WHEN CQ_INITIATOR_ID IS NULL THEN NULL ELSE split(CQ_INITIATOR_ID,"-")[size(split(CQ_INITIATOR_ID,"-"))-1] END, + CQ_INITIATOR_VERSION, + FROM_UNIXTIME(CQ_CLEANER_START DIV 1000), + NVL(CQ_POOL_NAME, 'default'), + CQ_NUMBER_OF_BUCKETS, + CQ_TBLPROPERTIES +FROM COMPACTION_QUEUE; + +CREATE EXTERNAL TABLE IF NOT EXISTS `SCHEDULED_QUERIES` ( + `SCHEDULED_QUERY_ID` bigint, + `SCHEDULE_NAME` string, + `ENABLED` boolean, + `CLUSTER_NAMESPACE` string, + `SCHEDULE` string, + `USER` string, + `QUERY` string, + `NEXT_EXECUTION` bigint, + `ACTIVE_EXECUTION_ID` bigint, + CONSTRAINT `SYS_PK_SCHEDULED_QUERIES` PRIMARY KEY (`SCHEDULED_QUERY_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SCHEDULED_QUERY_ID\", + \"SCHEDULE_NAME\", + \"ENABLED\", + \"CLUSTER_NAMESPACE\", + \"SCHEDULE\", + \"USER\", + \"QUERY\", + \"NEXT_EXECUTION\", + \"ACTIVE_EXECUTION_ID\" +FROM + \"SCHEDULED_QUERIES\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `SCHEDULED_EXECUTIONS` ( + `SCHEDULED_EXECUTION_ID` bigint, + `SCHEDULED_QUERY_ID` bigint, + `EXECUTOR_QUERY_ID` string, + `STATE` string, + `START_TIME` int, + `END_TIME` int, + `ERROR_MESSAGE` string, + `LAST_UPDATE_TIME` int, + CONSTRAINT `SYS_PK_SCHEDULED_EXECUTIONS` PRIMARY KEY (`SCHEDULED_EXECUTION_ID`) DISABLE +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"SCHEDULED_EXECUTION_ID\", + \"SCHEDULED_QUERY_ID\", + \"EXECUTOR_QUERY_ID\", + \"STATE\", + \"START_TIME\", + \"END_TIME\", + \"ERROR_MESSAGE\", + \"LAST_UPDATE_TIME\" +FROM + \"SCHEDULED_EXECUTIONS\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `TXNS` ( + `TXN_ID` bigint, + `TXN_STATE` string, + `TXN_STARTED` bigint, + `TXN_LAST_HEARTBEAT` bigint, + `TXN_USER` string, + `TXN_HOST` string, + `TXN_AGENT_INFO` string, + `TXN_META_INFO` string, + `TXN_HEARTBEAT_COUNT` int, + `TXN_TYPE` int +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TXN_ID\", + \"TXN_STATE\", + \"TXN_STARTED\", + \"TXN_LAST_HEARTBEAT\", + \"TXN_USER\", + \"TXN_HOST\", + \"TXN_AGENT_INFO\", + \"TXN_META_INFO\", + \"TXN_HEARTBEAT_COUNT\", + \"TXN_TYPE\" +FROM \"TXNS\"" +); + + +CREATE EXTERNAL TABLE IF NOT EXISTS `TXN_COMPONENTS` ( + `TC_TXNID` bigint, + `TC_DATABASE` string, + `TC_TABLE` string, + `TC_PARTITION` string, + `TC_OPERATION_TYPE` string, + `TC_WRITEID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"TC_TXNID\", + \"TC_DATABASE\", + \"TC_TABLE\", + \"TC_PARTITION\", + \"TC_OPERATION_TYPE\", + \"TC_WRITEID\" +FROM \"TXN_COMPONENTS\"" +); + + +CREATE OR REPLACE VIEW `TRANSACTIONS` ( + `TXN_ID`, + `STATE`, + `STARTED`, + `LAST_HEARTBEAT`, + `USER`, + `HOST`, + `AGENT_INFO`, + `META_INFO`, + `HEARTBEAT_COUNT`, + `TYPE`, + `TC_DATABASE`, + `TC_TABLE`, + `TC_PARTITION`, + `TC_OPERATION_TYPE`, + `TC_WRITEID` +) AS +SELECT DISTINCT + T.`TXN_ID`, + CASE WHEN T.`TXN_STATE` = 'o' THEN 'open' WHEN T.`TXN_STATE` = 'a' THEN 'aborted' WHEN T.`TXN_STATE` = 'c' THEN 'commited' ELSE 'UNKNOWN' END AS TXN_STATE, + FROM_UNIXTIME(T.`TXN_STARTED` DIV 1000) AS TXN_STARTED, + FROM_UNIXTIME(T.`TXN_LAST_HEARTBEAT` DIV 1000) AS TXN_LAST_HEARTBEAT, + T.`TXN_USER`, + T.`TXN_HOST`, + T.`TXN_AGENT_INFO`, + T.`TXN_META_INFO`, + T.`TXN_HEARTBEAT_COUNT`, + CASE WHEN T.`TXN_TYPE` = 0 THEN 'DEFAULT' WHEN T.`TXN_TYPE` = 1 THEN 'REPL_CREATED' WHEN T.`TXN_TYPE` = 2 THEN 'READ_ONLY' WHEN T.`TXN_TYPE` = 3 THEN 'COMPACTION' END AS TXN_TYPE, + TC.`TC_DATABASE`, + TC.`TC_TABLE`, + TC.`TC_PARTITION`, + CASE WHEN TC.`TC_OPERATION_TYPE` = 's' THEN 'SELECT' WHEN TC.`TC_OPERATION_TYPE` = 'i' THEN 'INSERT' WHEN TC.`TC_OPERATION_TYPE` = 'u' THEN 'UPDATE' WHEN TC.`TC_OPERATION_TYPE` = 'c' THEN 'COMPACT' END AS OPERATION_TYPE, + TC.`TC_WRITEID` +FROM `SYS`.`TXNS` AS T +LEFT JOIN `SYS`.`TXN_COMPONENTS` AS TC ON T.`TXN_ID` = TC.`TC_TXNID`; + +CREATE EXTERNAL TABLE `HIVE_LOCKS` ( + `HL_LOCK_EXT_ID` bigint, + `HL_LOCK_INT_ID` bigint, + `HL_TXNID` bigint, + `HL_DB` string, + `HL_TABLE` string, + `HL_PARTITION` string, + `HL_LOCK_STATE` string, + `HL_LOCK_TYPE` string, + `HL_LAST_HEARTBEAT` bigint, + `HL_ACQUIRED_AT` bigint, + `HL_USER` string, + `HL_HOST` string, + `HL_HEARTBEAT_COUNT` int, + `HL_AGENT_INFO` string, + `HL_BLOCKEDBY_EXT_ID` bigint, + `HL_BLOCKEDBY_INT_ID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"HL_LOCK_EXT_ID\", + \"HL_LOCK_INT_ID\", + \"HL_TXNID\", + \"HL_DB\", + \"HL_TABLE\", + \"HL_PARTITION\", + \"HL_LOCK_STATE\", + \"HL_LOCK_TYPE\", + \"HL_LAST_HEARTBEAT\", + \"HL_ACQUIRED_AT\", + \"HL_USER\", + \"HL_HOST\", + \"HL_HEARTBEAT_COUNT\", + \"HL_AGENT_INFO\", + \"HL_BLOCKEDBY_EXT_ID\", + \"HL_BLOCKEDBY_INT_ID\" +FROM \"HIVE_LOCKS\"" +); + +CREATE OR REPLACE VIEW `LOCKS` ( + `LOCK_EXT_ID`, + `LOCK_INT_ID`, + `TXNID`, + `DB`, + `TABLE`, + `PARTITION`, + `LOCK_STATE`, + `LOCK_TYPE`, + `LAST_HEARTBEAT`, + `ACQUIRED_AT`, + `USER`, + `HOST`, + `HEARTBEAT_COUNT`, + `AGENT_INFO`, + `BLOCKEDBY_EXT_ID`, + `BLOCKEDBY_INT_ID` +) AS +SELECT DISTINCT + HL.`HL_LOCK_EXT_ID`, + HL.`HL_LOCK_INT_ID`, + HL.`HL_TXNID`, + HL.`HL_DB`, + HL.`HL_TABLE`, + HL.`HL_PARTITION`, + CASE WHEN HL.`HL_LOCK_STATE` = 'a' THEN 'acquired' WHEN HL.`HL_LOCK_STATE` = 'w' THEN 'waiting' END AS LOCK_STATE, + CASE WHEN HL.`HL_LOCK_TYPE` = 'e' THEN 'exclusive' WHEN HL.`HL_LOCK_TYPE` = 'x' THEN 'excl_write' WHEN HL.`HL_LOCK_TYPE` = 'r' THEN 'shared_read' WHEN HL.`HL_LOCK_TYPE` = 'w' THEN 'shared_write' END AS LOCK_TYPE, + FROM_UNIXTIME(HL.`HL_LAST_HEARTBEAT`), + FROM_UNIXTIME(HL.`HL_ACQUIRED_AT`), + HL.`HL_USER`, + HL.`HL_HOST`, + HL.`HL_HEARTBEAT_COUNT`, + HL.`HL_AGENT_INFO`, + HL.`HL_BLOCKEDBY_EXT_ID`, + HL.`HL_BLOCKEDBY_INT_ID` +FROM SYS.`HIVE_LOCKS` AS HL; + +CREATE EXTERNAL TABLE IF NOT EXISTS `MIN_HISTORY_LEVEL` ( + `MHL_TXNID` bigint, + `MHL_MIN_OPEN_TXNID` bigint +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"MHL_TXNID\", + \"MHL_MIN_OPEN_TXNID\", +FROM \"MIN_HISTORY_LEVEL\"" +); + +CREATE EXTERNAL TABLE IF NOT EXISTS `REPLICATION_METRICS_ORIG` ( + `SCHEDULED_EXECUTION_ID` bigint, + `POLICY_NAME` string, + `DUMP_EXECUTION_ID` bigint, + `METADATA` string, + `PROGRESS` string, + `MESSAGE_FORMAT` varchar(16) +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"RM_SCHEDULED_EXECUTION_ID\", + \"RM_POLICY\", + \"RM_DUMP_EXECUTION_ID\", + \"RM_METADATA\", + \"RM_PROGRESS\", + \"MESSAGE_FORMAT\" +FROM \"REPLICATION_METRICS\"" +); + +CREATE OR REPLACE VIEW `REPLICATION_METRICS` ( + `SCHEDULED_EXECUTION_ID`, + `POLICY_NAME`, + `DUMP_EXECUTION_ID`, + `METADATA`, + `PROGRESS` +) AS +SELECT DISTINCT + RM.`SCHEDULED_EXECUTION_ID`, + RM.`POLICY_NAME`, + RM.`DUMP_EXECUTION_ID`, + RM.`METADATA`, + deserialize(RM.`PROGRESS`, RM.`MESSAGE_FORMAT`) +FROM SYS.`REPLICATION_METRICS_ORIG` AS RM; + +CREATE EXTERNAL TABLE IF NOT EXISTS `NOTIFICATION_LOG` ( + `NL_ID` bigint, + `EVENT_ID` bigint, + `EVENT_TIME` int, + `EVENT_TYPE` varchar(32), + `CAT_NAME` varchar(256), + `DB_NAME` varchar(128), + `TBL_NAME` varchar(256), + `MESSAGE` string, + `MESSAGE_FORMAT` varchar(16) +) +STORED BY 'org.apache.hive.storage.jdbc.JdbcStorageHandler' +TBLPROPERTIES ( +"hive.sql.database.type" = "METASTORE", +"hive.sql.query" = +"SELECT + \"NL_ID\", + \"EVENT_ID\", + \"EVENT_TIME\", + \"EVENT_TYPE\", + \"CAT_NAME\", + \"DB_NAME\", + \"TBL_NAME\", + \"MESSAGE\", + \"MESSAGE_FORMAT\" +FROM \"NOTIFICATION_LOG\"" +); + +CREATE OR REPLACE VIEW `REPLICATION_FAILOVER_FAILBACK_METRICS` ( + `DB_NAME`, + `DB_REPL_METRICS_NAME`, + `DB_REPL_METRICS_VALUE` +) AS +SELECT DISTINCT + A.`NAME`, + B.`PARAM_KEY`, + B.`PARAM_VALUE` +FROM SYS.DBS A +JOIN SYS.DATABASE_PARAMS B +WHERE + A.DB_ID=B.DB_ID + AND + B.PARAM_KEY LIKE 'repl_metrics%'; + + +CREATE DATABASE IF NOT EXISTS INFORMATION_SCHEMA; + +USE INFORMATION_SCHEMA; + + +CREATE OR REPLACE VIEW `SCHEMATA` +( + `CATALOG_NAME`, + `SCHEMA_NAME`, + `SCHEMA_OWNER`, + `DEFAULT_CHARACTER_SET_CATALOG`, + `DEFAULT_CHARACTER_SET_SCHEMA`, + `DEFAULT_CHARACTER_SET_NAME`, + `SQL_PATH` +) AS +SELECT DISTINCT + 'default', + D.`NAME`, + D.`OWNER_NAME`, + cast(null as string), + cast(null as string), + cast(null as string), + `DB_LOCATION_URI` +FROM + `sys`.`DBS` D LEFT JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND current_authorizer() = P.`AUTHORIZER`; + +CREATE OR REPLACE VIEW `TABLES` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `TABLE_TYPE`, + `SELF_REFERENCING_COLUMN_NAME`, + `REFERENCE_GENERATION`, + `USER_DEFINED_TYPE_CATALOG`, + `USER_DEFINED_TYPE_SCHEMA`, + `USER_DEFINED_TYPE_NAME`, + `IS_INSERTABLE_INTO`, + `IS_TYPED`, + `COMMIT_ACTION` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'VIEW', 'BASE_TABLE'), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + cast(null as string), + IF(length(T.VIEW_ORIGINAL_TEXT) > 0, 'NO', 'YES'), + 'NO', + cast(null as string) +FROM + `sys`.`TBLS` T JOIN `sys`.`DBS` D ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer(); + +CREATE OR REPLACE VIEW `TABLE_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE`, + `WITH_HIERARCHY` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`TBL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES'), + 'NO' +FROM + `sys`.`TBL_PRIVS` P JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR + (P2.`TBL_ID` IS NOT NULL AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER` = current_authorizer() AND P2.`AUTHORIZER` = current_authorizer()); + +CREATE OR REPLACE VIEW `COLUMNS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `ORDINAL_POSITION`, + `COLUMN_DEFAULT`, + `IS_NULLABLE`, + `DATA_TYPE`, + `CHARACTER_MAXIMUM_LENGTH`, + `CHARACTER_OCTET_LENGTH`, + `NUMERIC_PRECISION`, + `NUMERIC_PRECISION_RADIX`, + `NUMERIC_SCALE`, + `DATETIME_PRECISION`, + `INTERVAL_TYPE`, + `INTERVAL_PRECISION`, + `CHARACTER_SET_CATALOG`, + `CHARACTER_SET_SCHEMA`, + `CHARACTER_SET_NAME`, + `COLLATION_CATALOG`, + `COLLATION_SCHEMA`, + `COLLATION_NAME`, + `UDT_CATALOG`, + `UDT_SCHEMA`, + `UDT_NAME`, + `SCOPE_CATALOG`, + `SCOPE_SCHEMA`, + `SCOPE_NAME`, + `MAXIMUM_CARDINALITY`, + `DTD_IDENTIFIER`, + `IS_SELF_REFERENCING`, + `IS_IDENTITY`, + `IDENTITY_GENERATION`, + `IDENTITY_START`, + `IDENTITY_INCREMENT`, + `IDENTITY_MAXIMUM`, + `IDENTITY_MINIMUM`, + `IDENTITY_CYCLE`, + `IS_GENERATED`, + `GENERATION_EXPRESSION`, + `IS_SYSTEM_TIME_PERIOD_START`, + `IS_SYSTEM_TIME_PERIOD_END`, + `SYSTEM_TIME_PERIOD_TIMESTAMP_GENERATION`, + `IS_UPDATABLE`, + `DECLARED_DATA_TYPE`, + `DECLARED_NUMERIC_PRECISION`, + `DECLARED_NUMERIC_SCALE` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + C.COLUMN_NAME, + C.INTEGER_IDX, + cast (null as string), + 'YES', + C.TYPE_NAME as TYPE_NAME, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'varchar%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^VARCHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + WHEN lower(C.TYPE_NAME) like 'char%' THEN cast(regexp_extract(upper(C.TYPE_NAME), '^CHAR\\s*\\((\\d+)\\s*\\)$', 1) as int) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+),(\\d+)',2) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+),(\\d+)',2) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'date' THEN 0 + WHEN lower(C.TYPE_NAME) = 'timestamp' THEN 9 + ELSE null END, + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + C.CD_ID, + 'NO', + 'NO', + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + cast (null as string), + 'NEVER', + cast (null as string), + 'NO', + 'NO', + cast (null as string), + 'YES', + C.TYPE_NAME as DECLARED_DATA_TYPE, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 19 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 5 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 3 + WHEN lower(C.TYPE_NAME) = 'float' THEN 23 + WHEN lower(C.TYPE_NAME) = 'double' THEN 53 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN regexp_extract(upper(C.TYPE_NAME), '^DECIMAL\\s*\\((\\d+)',1) + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN regexp_extract(upper(C.TYPE_NAME), '^NUMERIC\\s*\\((\\d+)',1) + ELSE null END, + CASE WHEN lower(C.TYPE_NAME) = 'bigint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'int' THEN 10 + WHEN lower(C.TYPE_NAME) = 'smallint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'tinyint' THEN 10 + WHEN lower(C.TYPE_NAME) = 'float' THEN 2 + WHEN lower(C.TYPE_NAME) = 'double' THEN 2 + WHEN lower(C.TYPE_NAME) like 'decimal%' THEN 10 + WHEN lower(C.TYPE_NAME) like 'numeric%' THEN 10 + ELSE null END +FROM + `sys`.`COLUMNS_V2` C JOIN `sys`.`SDS` S ON (C.`CD_ID` = S.`CD_ID`) + JOIN `sys`.`TBLS` T ON (S.`SD_ID` = T.`SD_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + LEFT JOIN `sys`.`TBL_COL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND C.`COLUMN_NAME` = P.`COLUMN_NAME` + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND array_contains(split_map_privs(P.`TBL_COL_PRIV`),"SELECT") AND P.`AUTHORIZER`=current_authorizer(); + + +CREATE OR REPLACE VIEW `COLUMN_PRIVILEGES` +( + `GRANTOR`, + `GRANTEE`, + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `COLUMN_NAME`, + `PRIVILEGE_TYPE`, + `IS_GRANTABLE` +) AS +SELECT DISTINCT + P.`GRANTOR`, + P.`PRINCIPAL_NAME`, + 'default', + D.`NAME`, + T.`TBL_NAME`, + P.`COLUMN_NAME`, + P.`TBL_COL_PRIV`, + IF (P.`GRANT_OPTION` == 0, 'NO', 'YES') +FROM + (SELECT + Q.`GRANTOR`, + Q.`GRANT_OPTION`, + Q.`PRINCIPAL_NAME`, + Q.`PRINCIPAL_TYPE`, + Q.`AUTHORIZER`, + Q.`COLUMN_NAME`, + `TBL_COL_PRIV_TMP`.`TBL_COL_PRIV`, + Q.`TBL_ID` + FROM `sys`.`TBL_COL_PRIVS` AS Q + LATERAL VIEW explode(split_map_privs(Q.`TBL_COL_PRIV`)) `TBL_COL_PRIV_TMP` AS `TBL_COL_PRIV`) P + JOIN `sys`.`TBLS` T ON (P.`TBL_ID` = T.`TBL_ID`) + JOIN `sys`.`DBS` D ON (T.`DB_ID` = D.`DB_ID`) + JOIN `sys`.`SDS` S ON (S.`SD_ID` = T.`SD_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P2 ON (P.`TBL_ID` = P2.`TBL_ID`) +WHERE + NOT restrict_information_schema() OR P2.`TBL_ID` IS NOT NULL + AND P.`PRINCIPAL_NAME` = P2.`PRINCIPAL_NAME` AND P.`PRINCIPAL_TYPE` = P2.`PRINCIPAL_TYPE` + AND (P2.`PRINCIPAL_NAME`=current_user() AND P2.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P2.`PRINCIPAL_NAME`) OR P2.`PRINCIPAL_NAME` = 'public') AND P2.`PRINCIPAL_TYPE`='GROUP')) + AND P2.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer() AND P2.`AUTHORIZER`=current_authorizer(); + +CREATE OR REPLACE VIEW `VIEWS` +( + `TABLE_CATALOG`, + `TABLE_SCHEMA`, + `TABLE_NAME`, + `VIEW_DEFINITION`, + `CHECK_OPTION`, + `IS_UPDATABLE`, + `IS_INSERTABLE_INTO`, + `IS_TRIGGER_UPDATABLE`, + `IS_TRIGGER_DELETABLE`, + `IS_TRIGGER_INSERTABLE_INTO` +) AS +SELECT DISTINCT + 'default', + D.NAME, + T.TBL_NAME, + T.VIEW_ORIGINAL_TEXT, + CAST(NULL as string), + false, + false, + false, + false, + false +FROM + `sys`.`DBS` D JOIN `sys`.`TBLS` T ON (D.`DB_ID` = T.`DB_ID`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + length(T.VIEW_ORIGINAL_TEXT) > 0 + AND (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); + +CREATE OR REPLACE VIEW `COMPACTIONS` +( + `C_ID`, + `C_CATALOG`, + `C_DATABASE`, + `C_TABLE`, + `C_PARTITION`, + `C_TYPE`, + `C_STATE`, + `C_WORKER_HOST`, + `C_WORKER_ID`, + `C_WORKER_VERSION`, + `C_ENQUEUE_TIME`, + `C_START`, + `C_DURATION`, + `C_HADOOP_JOB_ID`, + `C_RUN_AS`, + `C_ERROR_MESSAGE`, + `C_NEXT_TXN_ID`, + `C_TXN_ID`, + `C_COMMIT_TIME`, + `C_HIGHEST_WRITE_ID`, + `C_INITIATOR_HOST`, + `C_INITIATOR_ID`, + `C_INITIATOR_VERSION`, + `C_CLEANER_START`, + `C_POOL_NAME`, + `C_NUMBER_OF_BUCKETS`, + `C_TBLPROPERTIES` +) AS +SELECT DISTINCT + C_ID, + C_CATALOG, + C_DATABASE, + C_TABLE, + C_PARTITION, + C_TYPE, + C_STATE, + C_WORKER_HOST, + C_WORKER_ID, + C_WORKER_VERSION, + C_ENQUEUE_TIME, + C_START, + C_DURATION, + C_HADOOP_JOB_ID, + C_RUN_AS, + C_ERROR_MESSAGE, + C_NEXT_TXN_ID, + C_TXN_ID, + C_COMMIT_TIME, + C_HIGHEST_WRITE_ID, + C_INITIATOR_HOST, + C_INITIATOR_ID, + C_INITIATOR_VERSION, + C_CLEANER_START, + C_POOL_NAME, + C_NUMBER_OF_BUCKETS, + C_TBLPROPERTIES +FROM + `sys`.`COMPACTIONS` C JOIN `sys`.`TBLS` T ON (C.`C_TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (C.`C_DATABASE` = D.`NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); + + +create or replace view SCHEDULED_QUERIES as +select + `SCHEDULED_QUERY_ID` , + `SCHEDULE_NAME` , + `ENABLED`, + `CLUSTER_NAMESPACE`, + `SCHEDULE`, + `USER`, + `QUERY`, + FROM_UNIXTIME(NEXT_EXECUTION) as NEXT_EXECUTION, + `ACTIVE_EXECUTION_ID` +FROM + SYS.SCHEDULED_QUERIES +; + +create or replace view SCHEDULED_EXECUTIONS as +SELECT + SCHEDULED_EXECUTION_ID, + SCHEDULE_NAME, + EXECUTOR_QUERY_ID, + `STATE`, + FROM_UNIXTIME(START_TIME) as START_TIME, + FROM_UNIXTIME(END_TIME) as END_TIME, + END_TIME-START_TIME as ELAPSED, + ERROR_MESSAGE, + FROM_UNIXTIME(LAST_UPDATE_TIME) AS LAST_UPDATE_TIME, + START_TIME as START_TIME_SINCE_EPOCH +FROM + SYS.SCHEDULED_EXECUTIONS SE +JOIN + SYS.SCHEDULED_QUERIES SQ +WHERE + SE.SCHEDULED_QUERY_ID=SQ.SCHEDULED_QUERY_ID; + +CREATE OR REPLACE VIEW `TRANSACTIONS` ( + `TXN_ID`, + `STATE`, + `STARTED`, + `LAST_HEARTBEAT`, + `USER`, + `HOST`, + `AGENT_INFO`, + `META_INFO`, + `HEARTBEAT_COUNT`, + `TYPE`, + `TC_DATABASE`, + `TC_TABLE`, + `TC_PARTITION`, + `TC_OPERATION_TYPE`, + `TC_WRITEID` +) AS +SELECT DISTINCT + `TXN_ID`, + `STATE`, + `STARTED`, + `LAST_HEARTBEAT`, + `USER`, + `HOST`, + `AGENT_INFO`, + `META_INFO`, + `HEARTBEAT_COUNT`, + `TYPE`, + `TC_DATABASE`, + `TC_TABLE`, + `TC_PARTITION`, + `TC_OPERATION_TYPE`, + `TC_WRITEID` +FROM `SYS`.`TRANSACTIONS` AS TXN JOIN `sys`.`TBLS` T ON (TXN.`TC_TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (TXN.`TC_DATABASE` = D.`NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); + +CREATE OR REPLACE VIEW `LOCKS` ( + `LOCK_EXT_ID`, + `LOCK_INT_ID`, + `TXNID`, + `DB`, + `TABLE`, + `PARTITION`, + `LOCK_STATE`, + `LOCK_TYPE`, + `LAST_HEARTBEAT`, + `ACQUIRED_AT`, + `USER`, + `HOST`, + `HEARTBEAT_COUNT`, + `AGENT_INFO`, + `BLOCKEDBY_EXT_ID`, + `BLOCKEDBY_INT_ID` +) AS +SELECT DISTINCT + `LOCK_EXT_ID`, + `LOCK_INT_ID`, + `TXNID`, + `DB`, + `TABLE`, + `PARTITION`, + `LOCK_STATE`, + `LOCK_TYPE`, + `LAST_HEARTBEAT`, + `ACQUIRED_AT`, + `USER`, + `HOST`, + `HEARTBEAT_COUNT`, + `AGENT_INFO`, + `BLOCKEDBY_EXT_ID`, + `BLOCKEDBY_INT_ID` +FROM SYS.`LOCKS` AS L JOIN `sys`.`TBLS` T ON (L.`TABLE` = T.`TBL_NAME`) + JOIN `sys`.`DBS` D ON (L.`DB` = D.`NAME`) + LEFT JOIN `sys`.`TBL_PRIVS` P ON (T.`TBL_ID` = P.`TBL_ID`) +WHERE + (NOT restrict_information_schema() OR P.`TBL_ID` IS NOT NULL + AND (P.`PRINCIPAL_NAME`=current_user() AND P.`PRINCIPAL_TYPE`='USER' + OR ((array_contains(current_groups(), P.`PRINCIPAL_NAME`) OR P.`PRINCIPAL_NAME` = 'public') AND P.`PRINCIPAL_TYPE`='GROUP')) + AND P.`TBL_PRIV`='SELECT' AND P.`AUTHORIZER`=current_authorizer()); diff --git a/metastore/scripts/upgrade/hive/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.hive.sql b/metastore/scripts/upgrade/hive/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.hive.sql deleted file mode 100644 index 4ba19c30dc13..000000000000 --- a/metastore/scripts/upgrade/hive/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.hive.sql +++ /dev/null @@ -1,3 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2'; - -SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2'; diff --git a/metastore/scripts/upgrade/hive/upgrade-4.0.0-beta-1-to-4.0.0.hive.sql b/metastore/scripts/upgrade/hive/upgrade-4.0.0-beta-1-to-4.0.0.hive.sql new file mode 100644 index 000000000000..aa2be181b0e1 --- /dev/null +++ b/metastore/scripts/upgrade/hive/upgrade-4.0.0-beta-1-to-4.0.0.hive.sql @@ -0,0 +1,3 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0'; + +SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0'; diff --git a/metastore/scripts/upgrade/hive/upgrade-4.0.0-to-4.1.0.hive.sql b/metastore/scripts/upgrade/hive/upgrade-4.0.0-to-4.1.0.hive.sql new file mode 100644 index 000000000000..801629de198e --- /dev/null +++ b/metastore/scripts/upgrade/hive/upgrade-4.0.0-to-4.1.0.hive.sql @@ -0,0 +1,3 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0'; + +SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0'; diff --git a/metastore/scripts/upgrade/hive/upgrade.order.hive b/metastore/scripts/upgrade/hive/upgrade.order.hive index 4ab1e7a5411d..d14dcfb07dcf 100644 --- a/metastore/scripts/upgrade/hive/upgrade.order.hive +++ b/metastore/scripts/upgrade/hive/upgrade.order.hive @@ -2,4 +2,5 @@ 3.1.0-to-4.0.0-alpha-1.hive 4.0.0-alpha-1-to-4.0.0-alpha-2.hive 4.0.0-alpha-2-to-4.0.0-beta-1.hive -4.0.0-beta-1-to-4.0.0-beta-2.hive \ No newline at end of file +4.0.0-beta-1-to-4.0.0.hive +4.0.0-to-4.1.0.hive \ No newline at end of file diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java index b68511d4fa84..7b6f597efbb2 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java @@ -322,7 +322,7 @@ static class HiveClientCacheKey { final private int threadId; private HiveClientCacheKey(HiveConf hiveConf, final int threadId) throws IOException, LoginException { - this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS); + this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTORE_URIS); ugi = Utils.getUGI(); this.hiveConf = hiveConf; this.threadId = threadId; diff --git a/packaging/pom.xml b/packaging/pom.xml index 274dcc7d08b7..6a1f6fbe516b 100644 --- a/packaging/pom.xml +++ b/packaging/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-packaging @@ -158,6 +158,10 @@ \Qhttps://opensource.org/licenses/Apache-2.0\E \Qhttp://www.apache.org/licenses/\E + + https?://(www\.)?opensource.org/licenses/bsd-license.php + https?://(www\.)?opensource.org/licenses/BSD-2-Clause + https?://(www\.)?opensource.org/licenses/BSD-3-Clause diff --git a/packaging/src/docker/README.md b/packaging/src/docker/README.md index c206914ef4d2..e452013aac52 100644 --- a/packaging/src/docker/README.md +++ b/packaging/src/docker/README.md @@ -57,18 +57,19 @@ For a quick start, launch the Metastore with Derby, Everything would be lost when the service is down. In order to save the Hive table's schema and data, start the container with an external Postgres and Volume to keep them, ```shell - docker run -d -p 9083:9083 --env SERVICE_NAME=metastore \ - --env DB_DRIVER=postgres \ + docker run -d -p 9083:9083 --env SERVICE_NAME=metastore --env DB_DRIVER=postgres \ --env SERVICE_OPTS="-Djavax.jdo.option.ConnectionDriverName=org.postgresql.Driver -Djavax.jdo.option.ConnectionURL=jdbc:postgresql://postgres:5432/metastore_db -Djavax.jdo.option.ConnectionUserName=hive -Djavax.jdo.option.ConnectionPassword=password" \ --mount source=warehouse,target=/opt/hive/data/warehouse \ + --mount type=bind,source=`mvn help:evaluate -Dexpression=settings.localRepository -q -DforceStdout`/org/postgresql/postgresql/42.5.1/postgresql-42.5.1.jar,target=/opt/hive/lib/postgres.jar \ --name metastore-standalone apache/hive:${HIVE_VERSION} ``` If you want to use your own `hdfs-site.xml` or `yarn-site.xml` for the service, you can provide the environment variable `HIVE_CUSTOM_CONF_DIR` for the command. For instance, put the custom configuration file under the directory `/opt/hive/conf`, then run, ```shell - docker run -d -p 9083:9083 --env SERVICE_NAME=metastore \ - --env DB_DRIVER=postgres -v /opt/hive/conf:/hive_custom_conf --env HIVE_CUSTOM_CONF_DIR=/hive_custom_conf \ + docker run -d -p 9083:9083 --env SERVICE_NAME=metastore --env DB_DRIVER=postgres \ + -v /opt/hive/conf:/hive_custom_conf --env HIVE_CUSTOM_CONF_DIR=/hive_custom_conf \ + --mount type=bind,source=`mvn help:evaluate -Dexpression=settings.localRepository -q -DforceStdout`/org/postgresql/postgresql/42.5.1/postgresql-42.5.1.jar,target=/opt/hive/lib/postgres.jar \ --name metastore apache/hive:${HIVE_VERSION} ``` @@ -81,16 +82,24 @@ then add "--env SCHEMA_COMMAND=upgradeSchema" to the command. Launch the HiveServer2 with an embedded Metastore, ```shell - docker run -d -p 10000:10000 -p 10002:10002 --env SERVICE_NAME=hiveserver2 --name hiveserver2-standalone apache/hive:${HIVE_VERSION} + docker run -d -p 10000:10000 -p 10002:10002 --env SERVICE_NAME=hiveserver2 --name hiveserver2-standalone apache/hive:${HIVE_VERSION} ``` or specify a remote Metastore if it's available, ```shell docker run -d -p 10000:10000 -p 10002:10002 --env SERVICE_NAME=hiveserver2 \ --env SERVICE_OPTS="-Dhive.metastore.uris=thrift://metastore:9083" \ --env IS_RESUME="true" \ + --env VERBOSE="true" \ --name hiveserver2-standalone apache/hive:${HIVE_VERSION} ``` + +NOTE: + +To skip schematool initialisation or upgrade for metastore use `IS_RESUME="true"`, and for verbose logging set `VERBOSE="true"` + + To save the data between container restarts, you can start the HiveServer2 with a Volume, + ```shell docker run -d -p 10000:10000 -p 10002:10002 --env SERVICE_NAME=hiveserver2 \ --env SERVICE_OPTS="-Dhive.metastore.uris=thrift://metastore:9083" \ @@ -101,12 +110,24 @@ Launch the HiveServer2 with an embedded Metastore, - HiveServer2, Metastore -To get a quick overview of both HiveServer2 and Metastore, you can run: +To get a quick overview of both HiveServer2 and Metastore, there is a `docker-compose.yml` placed under `packaging/src/docker` for this purpose, +specify the `POSTGRES_LOCAL_PATH` first: +```shell +export POSTGRES_LOCAL_PATH=your_local_path_to_postgres_driver +``` +Example: +```shell +mvn dependency:copy -Dartifact="org.postgresql:postgresql:42.5.1" && \ +export POSTGRES_LOCAL_PATH=`mvn help:evaluate -Dexpression=settings.localRepository -q -DforceStdout`/org/postgresql/postgresql/42.5.1/postgresql-42.5.1.jar +``` +If you don't install maven or have problem in resolving the postgres driver, you can always download this jar yourself, +change the `POSTGRES_LOCAL_PATH` to the path of the downloaded jar. +Then, ```shell - cd packaging/src/docker - docker compose up -d - ``` +docker compose up -d +``` +HiveServer2, Metastore and Postgres services will be started as a consequence. Volumes are used to persist data generated by Hive inside Postgres and HiveServer2 containers, - hive_db @@ -115,6 +136,11 @@ Volumes are used to persist data generated by Hive inside Postgres and HiveServe The volume stores tables' files inside HiveServer2 container. +To stop/remove them all, +```shell +docker compose down +``` + #### Usage - HiveServer2 web diff --git a/packaging/src/docker/docker-compose.yml b/packaging/src/docker/docker-compose.yml index 6f9560b8952c..d0602f08f5c8 100644 --- a/packaging/src/docker/docker-compose.yml +++ b/packaging/src/docker/docker-compose.yml @@ -32,6 +32,9 @@ services: - '9083:9083' volumes: - warehouse:/opt/hive/data/warehouse + - type: bind + source: ${POSTGRES_LOCAL_PATH} + target: /opt/hive/lib/postgres.jar hiveserver2: image: apache/hive:${HIVE_VERSION} diff --git a/packaging/src/docker/entrypoint.sh b/packaging/src/docker/entrypoint.sh index a19b50d8daad..f22652c2afb6 100644 --- a/packaging/src/docker/entrypoint.sh +++ b/packaging/src/docker/entrypoint.sh @@ -22,13 +22,14 @@ set -x : ${DB_DRIVER:=derby} SKIP_SCHEMA_INIT="${IS_RESUME:-false}" +[[ $VERBOSE = "true" ]] && VERBOSE_MODE="--verbose" || VERBOSE_MODE="" function initialize_hive { COMMAND="-initOrUpgradeSchema" if [ "$(echo "$HIVE_VER" | cut -d '.' -f1)" -lt "4" ]; then COMMAND="-${SCHEMA_COMMAND:-initSchema}" fi - $HIVE_HOME/bin/schematool -dbType $DB_DRIVER $COMMAND + $HIVE_HOME/bin/schematool -dbType $DB_DRIVER $COMMAND $VERBOSE_MODE if [ $? -eq 0 ]; then echo "Initialized schema successfully.." else @@ -53,8 +54,8 @@ fi if [ "${SERVICE_NAME}" == "hiveserver2" ]; then export HADOOP_CLASSPATH=$TEZ_HOME/*:$TEZ_HOME/lib/*:$HADOOP_CLASSPATH + exec $HIVE_HOME/bin/hive --skiphadoopversion --skiphbasecp --service $SERVICE_NAME elif [ "${SERVICE_NAME}" == "metastore" ]; then export METASTORE_PORT=${METASTORE_PORT:-9083} + exec $HIVE_HOME/bin/hive --skiphadoopversion --skiphbasecp $VERBOSE_MODE --service $SERVICE_NAME fi - -exec $HIVE_HOME/bin/hive --skiphadoopversion --skiphbasecp --service $SERVICE_NAME diff --git a/packaging/src/main/assembly/src.xml b/packaging/src/main/assembly/src.xml index 6cd94585cbf9..4d4551798ac0 100644 --- a/packaging/src/main/assembly/src.xml +++ b/packaging/src/main/assembly/src.xml @@ -110,7 +110,6 @@ standalone-metastore/pom.xml streaming/**/* testutils/**/* - upgrade-acid/**/* vector-code-gen/**/* kudu-handler/**/* parser/**/* diff --git a/parser/pom.xml b/parser/pom.xml index e9dddbf00bc7..8bbd7d794952 100644 --- a/parser/pom.xml +++ b/parser/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-parser diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g index 8e8ec4e33f99..873a28eb05cd 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/AlterClauseParser.g @@ -52,6 +52,7 @@ alterStatement | KW_ALTER KW_MATERIALIZED KW_VIEW tableNameTree=tableName alterMaterializedViewStatementSuffix[$tableNameTree.tree] -> alterMaterializedViewStatementSuffix | KW_ALTER (KW_DATABASE|KW_SCHEMA) alterDatabaseStatementSuffix -> alterDatabaseStatementSuffix | KW_ALTER KW_DATACONNECTOR alterDataConnectorStatementSuffix -> alterDataConnectorStatementSuffix + | KW_OPTIMIZE KW_TABLE tableName optimizeTableStatementSuffix -> ^(TOK_ALTERTABLE tableName optimizeTableStatementSuffix) ; alterTableStatementSuffix @@ -99,6 +100,19 @@ alterTblPartitionStatementSuffix[boolean partition] | alterStatementSuffixAddCol | alterStatementSuffixUpdateColumns ; + +optimizeTableStatementSuffix +@init { gParent.pushMsg("optimize table statement suffix", state); } +@after { gParent.popMsg(state); } + : optimizeTblRewriteDataSuffix + ; + +optimizeTblRewriteDataSuffix +@init { gParent.msgs.push("compaction request"); } +@after { gParent.msgs.pop(); } + : KW_REWRITE KW_DATA + -> ^(TOK_ALTERTABLE_COMPACT Identifier["'MAJOR'"] TOK_BLOCKING) + ; alterStatementPartitionKeyType @init {gParent.msgs.push("alter partition key type"); } diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g index a26d66d214d6..078cd561b1b4 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveLexerParent.g @@ -402,6 +402,7 @@ KW_CHERRY_PICK: 'CHERRY-PICK'; KW_ORPHAN_FILES: 'ORPHAN-FILES'; KW_OLDER: 'OLDER'; KW_THAN: 'THAN'; +KW_OPTIMIZE: 'OPTIMIZE'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 586ea34af3ed..d9cdac0c0d19 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -594,6 +594,7 @@ import org.apache.hadoop.hive.conf.HiveConf; xlateMap.put("KW_EXCEPT", "EXCEPT"); xlateMap.put("KW_LOAD", "LOAD"); xlateMap.put("KW_DATA", "DATA"); + xlateMap.put("KW_OPTIMIZE", "OPTIMIZE"); xlateMap.put("KW_INPATH", "INPATH"); xlateMap.put("KW_IS", "IS"); xlateMap.put("KW_NULL", "NULL"); diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 31b63e66345b..37b7c4b0ee00 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -327,7 +327,7 @@ castExpression LPAREN expression KW_AS - toType=primitiveType + toType=type (fmt=KW_FORMAT StringLiteral)? RPAREN // simple cast @@ -987,6 +987,7 @@ nonReserved | KW_BRANCH | KW_SNAPSHOTS | KW_RETAIN | KW_RETENTION | KW_TAG | KW_FAST_FORWARD + | KW_OPTIMIZE ; //The following SQL2011 reserved keywords are used as function name only, but not as identifiers. diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java index 7e54bdf95d5c..c99895756d0d 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseDriver.java @@ -122,7 +122,7 @@ public ParseResult parse(String command, Configuration configuration) try { r = parser.statement(); } catch (RecognitionException e) { - throw new ParseException(parser.errors); + throw new ParseException(parser.errors, e); } if (lexer.getErrors().size() == 0 && parser.errors.size() == 0) { @@ -152,7 +152,7 @@ public ASTNode parseHint(String command) throws ParseException { try { r = parser.hint(); } catch (RecognitionException e) { - throw new ParseException(parser.errors); + throw new ParseException(parser.errors, e); } if (lexer.getErrors().size() == 0 && parser.errors.size() == 0) { @@ -191,7 +191,7 @@ public ParseResult parseSelect(String command, Configuration configuration) thro try { r = parser.selectClause(); } catch (RecognitionException e) { - throw new ParseException(parser.errors); + throw new ParseException(parser.errors, e); } if (lexer.getErrors().size() == 0 && parser.errors.size() == 0) { @@ -215,7 +215,7 @@ public ASTNode parseExpression(String command) throws ParseException { try { r = parser.expression(); } catch (RecognitionException e) { - throw new ParseException(parser.errors); + throw new ParseException(parser.errors, e); } if (lexer.getErrors().size() == 0 && parser.errors.size() == 0) { @@ -238,7 +238,7 @@ public ASTNode parseTriggerExpression(String command) throws ParseException { try { r = parser.triggerExpressionStandalone(); } catch (RecognitionException e) { - throw new ParseException(parser.errors); + throw new ParseException(parser.errors, e); } if (lexer.getErrors().size() != 0) { throw new ParseException(lexer.getErrors()); @@ -258,7 +258,7 @@ public ASTNode parseTriggerActionExpression(String command) throws ParseExceptio try { r = parser.triggerActionExpressionStandalone(); } catch (RecognitionException e) { - throw new ParseException(parser.errors); + throw new ParseException(parser.errors, e); } if (lexer.getErrors().size() != 0) { throw new ParseException(lexer.getErrors()); diff --git a/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseException.java b/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseException.java index 7d945adf0d38..5b2d17a19e71 100644 --- a/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseException.java +++ b/parser/src/java/org/apache/hadoop/hive/ql/parse/ParseException.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.parse; -import java.util.ArrayList; - /** * ParseException. * @@ -27,13 +25,18 @@ public class ParseException extends Exception { private static final long serialVersionUID = 1L; - ArrayList errors; + private final Iterable errors; - public ParseException(ArrayList errors) { + public ParseException(Iterable errors) { super(); this.errors = errors; } + public ParseException(Iterable errors, Throwable cause) { + super(cause); + this.errors = errors; + } + @Override public String getMessage() { diff --git a/pom.xml b/pom.xml index 376c698d181a..08842634353a 100644 --- a/pom.xml +++ b/pom.xml @@ -21,7 +21,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT pom Hive https://hive.apache.org @@ -58,12 +58,11 @@ testutils packaging standalone-metastore - upgrade-acid kafka-handler - 4.0.0-beta-2-SNAPSHOT - 4.0.0-beta-2 + 4.1.0-SNAPSHOT + 4.1.0 1.8 1.8 @@ -100,7 +99,7 @@ 3.5.0 3.0.0-M4 2.7.10 - 2.1.0 + 2.3.0 1.10.1 1.10.13 @@ -112,7 +111,7 @@ 12.0.0 1.12.0 1.11.3 - 1.68 + 1.77 1.25.0 5.2.8 5.2.10 @@ -151,8 +150,8 @@ 4.5.13 4.4.13 - 2.5.1 - 2.13.5 + 2.5.2 + 2.16.1 2.3.4 2.4.1 3.1.0 @@ -194,12 +193,12 @@ 1.13.1 0.16.0 1.5.6 - 3.21.7 + 3.24.4 1.0.1 1.7.30 4.0.4 - 4.0.0-beta-2-SNAPSHOT - 0.10.2 + 4.1.0-SNAPSHOT + 0.10.3 2.2.0 1.1 1.1.10.4 @@ -207,7 +206,7 @@ 2.3 2.12.2 2.3.4 - 3.7.2 + 3.8.3 1.1 2.4.0 5.2.0 @@ -216,7 +215,7 @@ 0.10.5 1.2 2.0.1 - 2.8.0 + 2.9.0 3.0.11 1.1.0-incubating 4.0.3 @@ -224,7 +223,8 @@ 1.0.1 1.12.499 2.4.0 - 5.2.24.RELEASE + + 5.3.21 @@ -685,6 +685,14 @@ zookeeper ${zookeeper.version} + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + org.slf4j slf4j-log4j12 @@ -880,8 +888,8 @@ org.bouncycastle - bcprov-jdk15on - ${bcprov-jdk15on.version} + bcprov-jdk18on + ${bcprov-jdk18on.version} org.apache.santuario diff --git a/ql/pom.xml b/ql/pom.xml index 30bd3e310359..5340341b5132 100644 --- a/ql/pom.xml +++ b/ql/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-exec @@ -26,7 +26,7 @@ .. 0.10.2 - 2.1.0 + 2.3.0 @@ -372,6 +372,16 @@ org.apache.zookeeper zookeeper + + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + + org.apache.curator diff --git a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java index 3f6bef80a8a2..1f27626ce495 100644 --- a/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java +++ b/ql/src/gen/protobuf/gen-java/org/apache/hadoop/hive/ql/hooks/proto/HiveHookEvents.java @@ -5,49 +5,38 @@ public final class HiveHookEvents { private HiveHookEvents() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistryLite registry) { - } - public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions( - (com.google.protobuf.ExtensionRegistryLite) registry); } - public interface MapFieldEntryOrBuilder extends - // @@protoc_insertion_point(interface_extends:MapFieldEntry) - com.google.protobuf.MessageOrBuilder { + public interface MapFieldEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string key = 1; /** * optional string key = 1; - * @return Whether the key field is set. */ boolean hasKey(); /** * optional string key = 1; - * @return The key. */ java.lang.String getKey(); /** * optional string key = 1; - * @return The bytes for key. */ com.google.protobuf.ByteString getKeyBytes(); + // optional string value = 2; /** * optional string value = 2; - * @return Whether the value field is set. */ boolean hasValue(); /** * optional string value = 2; - * @return The value. */ java.lang.String getValue(); /** * optional string value = 2; - * @return The bytes for value. */ com.google.protobuf.ByteString getValueBytes(); @@ -56,39 +45,35 @@ public interface MapFieldEntryOrBuilder extends * Protobuf type {@code MapFieldEntry} */ public static final class MapFieldEntry extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:MapFieldEntry) - MapFieldEntryOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements MapFieldEntryOrBuilder { // Use MapFieldEntry.newBuilder() to construct. - private MapFieldEntry(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private MapFieldEntry(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private MapFieldEntry() { - key_ = ""; - value_ = ""; + private MapFieldEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MapFieldEntry defaultInstance; + public static MapFieldEntry getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new MapFieldEntry(); + public MapFieldEntry getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private MapFieldEntry( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -100,34 +85,30 @@ private MapFieldEntry( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - key_ = bs; + key_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - value_ = bs; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + value_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -138,30 +119,41 @@ private MapFieldEntry( return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_MapFieldEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_MapFieldEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.class, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MapFieldEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MapFieldEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string key = 1; public static final int KEY_FIELD_NUMBER = 1; - private volatile java.lang.Object key_; + private java.lang.Object key_; /** * optional string key = 1; - * @return Whether the key field is set. */ - @java.lang.Override public boolean hasKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string key = 1; - * @return The key. */ - @java.lang.Override public java.lang.String getKey() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { @@ -178,9 +170,7 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @return The bytes for key. */ - @java.lang.Override public com.google.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; @@ -195,21 +185,18 @@ public java.lang.String getKey() { } } + // optional string value = 2; public static final int VALUE_FIELD_NUMBER = 2; - private volatile java.lang.Object value_; + private java.lang.Object value_; /** * optional string value = 2; - * @return Whether the value field is set. */ - @java.lang.Override public boolean hasValue() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string value = 2; - * @return The value. */ - @java.lang.Override public java.lang.String getValue() { java.lang.Object ref = value_; if (ref instanceof java.lang.String) { @@ -226,9 +213,7 @@ public java.lang.String getValue() { } /** * optional string value = 2; - * @return The bytes for value. */ - @java.lang.Override public com.google.protobuf.ByteString getValueBytes() { java.lang.Object ref = value_; @@ -243,101 +228,57 @@ public java.lang.String getValue() { } } + private void initFields() { + key_ = ""; + value_ = ""; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, key_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, value_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, key_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, value_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry)) { - return super.equals(obj); - } - org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry other = (org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry) obj; - - if (hasKey() != other.hasKey()) return false; - if (hasKey()) { - if (!getKey() - .equals(other.getKey())) return false; - } - if (hasValue() != other.hasValue()) return false; - if (hasValue()) { - if (!getValue() - .equals(other.getValue())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasKey()) { - hash = (37 * hash) + KEY_FIELD_NUMBER; - hash = (53 * hash) + getKey().hashCode(); - } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -361,59 +302,46 @@ public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -421,16 +349,14 @@ protected Builder newBuilderForType( * Protobuf type {@code MapFieldEntry} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:MapFieldEntry) - org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_MapFieldEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_MapFieldEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -443,16 +369,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); key_ = ""; @@ -462,18 +390,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_MapFieldEntry_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry getDefaultInstanceForType() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry build() { org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry result = buildPartial(); if (!result.isInitialized()) { @@ -482,16 +411,15 @@ public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry build( return result; } - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry buildPartial() { org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry result = new org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.key_ = key_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.value_ = value_; @@ -500,39 +428,6 @@ public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry buildP return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry) { return mergeFrom((org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry)other); @@ -554,17 +449,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.Ma value_ = other.value_; onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -574,7 +466,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -584,27 +476,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string key = 1; private java.lang.Object key_ = ""; /** * optional string key = 1; - * @return Whether the key field is set. */ public boolean hasKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string key = 1; - * @return The key. */ public java.lang.String getKey() { java.lang.Object ref = key_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; return s; } else { return (java.lang.String) ref; @@ -612,7 +500,6 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @return The bytes for key. */ public com.google.protobuf.ByteString getKeyBytes() { @@ -629,8 +516,6 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @param value The key to set. - * @return This builder for chaining. */ public Builder setKey( java.lang.String value) { @@ -644,7 +529,6 @@ public Builder setKey( } /** * optional string key = 1; - * @return This builder for chaining. */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); @@ -654,8 +538,6 @@ public Builder clearKey() { } /** * optional string key = 1; - * @param value The bytes for key to set. - * @return This builder for chaining. */ public Builder setKeyBytes( com.google.protobuf.ByteString value) { @@ -668,27 +550,23 @@ public Builder setKeyBytes( return this; } + // optional string value = 2; private java.lang.Object value_ = ""; /** * optional string value = 2; - * @return Whether the value field is set. */ public boolean hasValue() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string value = 2; - * @return The value. */ public java.lang.String getValue() { java.lang.Object ref = value_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; return s; } else { return (java.lang.String) ref; @@ -696,7 +574,6 @@ public java.lang.String getValue() { } /** * optional string value = 2; - * @return The bytes for value. */ public com.google.protobuf.ByteString getValueBytes() { @@ -713,8 +590,6 @@ public java.lang.String getValue() { } /** * optional string value = 2; - * @param value The value to set. - * @return This builder for chaining. */ public Builder setValue( java.lang.String value) { @@ -728,7 +603,6 @@ public Builder setValue( } /** * optional string value = 2; - * @return This builder for chaining. */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); @@ -738,8 +612,6 @@ public Builder clearValue() { } /** * optional string value = 2; - * @param value The bytes for value to set. - * @return This builder for chaining. */ public Builder setValueBytes( com.google.protobuf.ByteString value) { @@ -751,243 +623,177 @@ public Builder setValueBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:MapFieldEntry) } - // @@protoc_insertion_point(class_scope:MapFieldEntry) - private static final org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry(); - } - - public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public MapFieldEntry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new MapFieldEntry(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new MapFieldEntry(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:MapFieldEntry) } - public interface HiveHookEventProtoOrBuilder extends - // @@protoc_insertion_point(interface_extends:HiveHookEventProto) - com.google.protobuf.MessageOrBuilder { + public interface HiveHookEventProtoOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string eventType = 1; /** * optional string eventType = 1; - * @return Whether the eventType field is set. */ boolean hasEventType(); /** * optional string eventType = 1; - * @return The eventType. */ java.lang.String getEventType(); /** * optional string eventType = 1; - * @return The bytes for eventType. */ com.google.protobuf.ByteString getEventTypeBytes(); + // optional string hiveQueryId = 2; /** * optional string hiveQueryId = 2; - * @return Whether the hiveQueryId field is set. */ boolean hasHiveQueryId(); /** * optional string hiveQueryId = 2; - * @return The hiveQueryId. */ java.lang.String getHiveQueryId(); /** * optional string hiveQueryId = 2; - * @return The bytes for hiveQueryId. */ com.google.protobuf.ByteString getHiveQueryIdBytes(); + // optional int64 timestamp = 3; /** * optional int64 timestamp = 3; - * @return Whether the timestamp field is set. */ boolean hasTimestamp(); /** * optional int64 timestamp = 3; - * @return The timestamp. */ long getTimestamp(); + // optional string executionMode = 4; /** * optional string executionMode = 4; - * @return Whether the executionMode field is set. */ boolean hasExecutionMode(); /** * optional string executionMode = 4; - * @return The executionMode. */ java.lang.String getExecutionMode(); /** * optional string executionMode = 4; - * @return The bytes for executionMode. */ com.google.protobuf.ByteString getExecutionModeBytes(); + // optional string requestUser = 5; /** * optional string requestUser = 5; - * @return Whether the requestUser field is set. */ boolean hasRequestUser(); /** * optional string requestUser = 5; - * @return The requestUser. */ java.lang.String getRequestUser(); /** * optional string requestUser = 5; - * @return The bytes for requestUser. */ com.google.protobuf.ByteString getRequestUserBytes(); + // optional string queue = 6; /** * optional string queue = 6; - * @return Whether the queue field is set. */ boolean hasQueue(); /** * optional string queue = 6; - * @return The queue. */ java.lang.String getQueue(); /** * optional string queue = 6; - * @return The bytes for queue. */ com.google.protobuf.ByteString getQueueBytes(); + // optional string user = 7; /** * optional string user = 7; - * @return Whether the user field is set. */ boolean hasUser(); /** * optional string user = 7; - * @return The user. */ java.lang.String getUser(); /** * optional string user = 7; - * @return The bytes for user. */ com.google.protobuf.ByteString getUserBytes(); + // optional string operationId = 8; /** * optional string operationId = 8; - * @return Whether the operationId field is set. */ boolean hasOperationId(); /** * optional string operationId = 8; - * @return The operationId. */ java.lang.String getOperationId(); /** * optional string operationId = 8; - * @return The bytes for operationId. */ com.google.protobuf.ByteString getOperationIdBytes(); + // repeated string tablesWritten = 9; /** * repeated string tablesWritten = 9; - * @return A list containing the tablesWritten. */ java.util.List - getTablesWrittenList(); + getTablesWrittenList(); /** * repeated string tablesWritten = 9; - * @return The count of tablesWritten. */ int getTablesWrittenCount(); /** * repeated string tablesWritten = 9; - * @param index The index of the element to return. - * @return The tablesWritten at the given index. */ java.lang.String getTablesWritten(int index); /** * repeated string tablesWritten = 9; - * @param index The index of the value to return. - * @return The bytes of the tablesWritten at the given index. */ com.google.protobuf.ByteString getTablesWrittenBytes(int index); + // repeated string tablesRead = 10; /** * repeated string tablesRead = 10; - * @return A list containing the tablesRead. */ java.util.List - getTablesReadList(); + getTablesReadList(); /** * repeated string tablesRead = 10; - * @return The count of tablesRead. */ int getTablesReadCount(); /** * repeated string tablesRead = 10; - * @param index The index of the element to return. - * @return The tablesRead at the given index. */ java.lang.String getTablesRead(int index); /** * repeated string tablesRead = 10; - * @param index The index of the value to return. - * @return The bytes of the tablesRead at the given index. */ com.google.protobuf.ByteString getTablesReadBytes(int index); + // repeated .MapFieldEntry otherInfo = 50; /** * repeated .MapFieldEntry otherInfo = 50; */ @@ -1016,47 +822,35 @@ org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder getO * Protobuf type {@code HiveHookEventProto} */ public static final class HiveHookEventProto extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:HiveHookEventProto) - HiveHookEventProtoOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements HiveHookEventProtoOrBuilder { // Use HiveHookEventProto.newBuilder() to construct. - private HiveHookEventProto(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private HiveHookEventProto(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private HiveHookEventProto() { - eventType_ = ""; - hiveQueryId_ = ""; - executionMode_ = ""; - requestUser_ = ""; - queue_ = ""; - user_ = ""; - operationId_ = ""; - tablesWritten_ = com.google.protobuf.LazyStringArrayList.EMPTY; - tablesRead_ = com.google.protobuf.LazyStringArrayList.EMPTY; - otherInfo_ = java.util.Collections.emptyList(); + private HiveHookEventProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final HiveHookEventProto defaultInstance; + public static HiveHookEventProto getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new HiveHookEventProto(); + public HiveHookEventProto getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private HiveHookEventProto( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -1068,16 +862,21 @@ private HiveHookEventProto( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - eventType_ = bs; + eventType_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - hiveQueryId_ = bs; + hiveQueryId_ = input.readBytes(); break; } case 24: { @@ -1086,86 +885,69 @@ private HiveHookEventProto( break; } case 34: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000008; - executionMode_ = bs; + executionMode_ = input.readBytes(); break; } case 42: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000010; - requestUser_ = bs; + requestUser_ = input.readBytes(); break; } case 50: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000020; - queue_ = bs; + queue_ = input.readBytes(); break; } case 58: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000040; - user_ = bs; + user_ = input.readBytes(); break; } case 66: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000080; - operationId_ = bs; + operationId_ = input.readBytes(); break; } case 74: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000100) != 0)) { + if (!((mutable_bitField0_ & 0x00000100) == 0x00000100)) { tablesWritten_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000100; } - tablesWritten_.add(bs); + tablesWritten_.add(input.readBytes()); break; } case 82: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00000200) != 0)) { + if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) { tablesRead_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00000200; } - tablesRead_.add(bs); + tablesRead_.add(input.readBytes()); break; } case 402: { - if (!((mutable_bitField0_ & 0x00000400) != 0)) { + if (!((mutable_bitField0_ & 0x00000400) == 0x00000400)) { otherInfo_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000400; } - otherInfo_.add( - input.readMessage(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.PARSER, extensionRegistry)); - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + otherInfo_.add(input.readMessage(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.PARSER, extensionRegistry)); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000100) != 0)) { - tablesWritten_ = tablesWritten_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00000100) == 0x00000100)) { + tablesWritten_ = new com.google.protobuf.UnmodifiableLazyStringList(tablesWritten_); } - if (((mutable_bitField0_ & 0x00000200) != 0)) { - tablesRead_ = tablesRead_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) { + tablesRead_ = new com.google.protobuf.UnmodifiableLazyStringList(tablesRead_); } - if (((mutable_bitField0_ & 0x00000400) != 0)) { + if (((mutable_bitField0_ & 0x00000400) == 0x00000400)) { otherInfo_ = java.util.Collections.unmodifiableList(otherInfo_); } this.unknownFields = unknownFields.build(); @@ -1177,30 +959,41 @@ private HiveHookEventProto( return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_HiveHookEventProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_HiveHookEventProto_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto.class, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public HiveHookEventProto parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new HiveHookEventProto(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string eventType = 1; public static final int EVENTTYPE_FIELD_NUMBER = 1; - private volatile java.lang.Object eventType_; + private java.lang.Object eventType_; /** * optional string eventType = 1; - * @return Whether the eventType field is set. */ - @java.lang.Override public boolean hasEventType() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string eventType = 1; - * @return The eventType. */ - @java.lang.Override public java.lang.String getEventType() { java.lang.Object ref = eventType_; if (ref instanceof java.lang.String) { @@ -1217,9 +1010,7 @@ public java.lang.String getEventType() { } /** * optional string eventType = 1; - * @return The bytes for eventType. */ - @java.lang.Override public com.google.protobuf.ByteString getEventTypeBytes() { java.lang.Object ref = eventType_; @@ -1234,21 +1025,18 @@ public java.lang.String getEventType() { } } + // optional string hiveQueryId = 2; public static final int HIVEQUERYID_FIELD_NUMBER = 2; - private volatile java.lang.Object hiveQueryId_; + private java.lang.Object hiveQueryId_; /** * optional string hiveQueryId = 2; - * @return Whether the hiveQueryId field is set. */ - @java.lang.Override public boolean hasHiveQueryId() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string hiveQueryId = 2; - * @return The hiveQueryId. */ - @java.lang.Override public java.lang.String getHiveQueryId() { java.lang.Object ref = hiveQueryId_; if (ref instanceof java.lang.String) { @@ -1265,9 +1053,7 @@ public java.lang.String getHiveQueryId() { } /** * optional string hiveQueryId = 2; - * @return The bytes for hiveQueryId. */ - @java.lang.Override public com.google.protobuf.ByteString getHiveQueryIdBytes() { java.lang.Object ref = hiveQueryId_; @@ -1282,40 +1068,34 @@ public java.lang.String getHiveQueryId() { } } + // optional int64 timestamp = 3; public static final int TIMESTAMP_FIELD_NUMBER = 3; private long timestamp_; /** * optional int64 timestamp = 3; - * @return Whether the timestamp field is set. */ - @java.lang.Override public boolean hasTimestamp() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int64 timestamp = 3; - * @return The timestamp. */ - @java.lang.Override public long getTimestamp() { return timestamp_; } + // optional string executionMode = 4; public static final int EXECUTIONMODE_FIELD_NUMBER = 4; - private volatile java.lang.Object executionMode_; + private java.lang.Object executionMode_; /** * optional string executionMode = 4; - * @return Whether the executionMode field is set. */ - @java.lang.Override public boolean hasExecutionMode() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional string executionMode = 4; - * @return The executionMode. */ - @java.lang.Override public java.lang.String getExecutionMode() { java.lang.Object ref = executionMode_; if (ref instanceof java.lang.String) { @@ -1332,9 +1112,7 @@ public java.lang.String getExecutionMode() { } /** * optional string executionMode = 4; - * @return The bytes for executionMode. */ - @java.lang.Override public com.google.protobuf.ByteString getExecutionModeBytes() { java.lang.Object ref = executionMode_; @@ -1349,21 +1127,18 @@ public java.lang.String getExecutionMode() { } } + // optional string requestUser = 5; public static final int REQUESTUSER_FIELD_NUMBER = 5; - private volatile java.lang.Object requestUser_; + private java.lang.Object requestUser_; /** * optional string requestUser = 5; - * @return Whether the requestUser field is set. */ - @java.lang.Override public boolean hasRequestUser() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional string requestUser = 5; - * @return The requestUser. */ - @java.lang.Override public java.lang.String getRequestUser() { java.lang.Object ref = requestUser_; if (ref instanceof java.lang.String) { @@ -1380,9 +1155,7 @@ public java.lang.String getRequestUser() { } /** * optional string requestUser = 5; - * @return The bytes for requestUser. */ - @java.lang.Override public com.google.protobuf.ByteString getRequestUserBytes() { java.lang.Object ref = requestUser_; @@ -1397,21 +1170,18 @@ public java.lang.String getRequestUser() { } } + // optional string queue = 6; public static final int QUEUE_FIELD_NUMBER = 6; - private volatile java.lang.Object queue_; + private java.lang.Object queue_; /** * optional string queue = 6; - * @return Whether the queue field is set. */ - @java.lang.Override public boolean hasQueue() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string queue = 6; - * @return The queue. */ - @java.lang.Override public java.lang.String getQueue() { java.lang.Object ref = queue_; if (ref instanceof java.lang.String) { @@ -1428,9 +1198,7 @@ public java.lang.String getQueue() { } /** * optional string queue = 6; - * @return The bytes for queue. */ - @java.lang.Override public com.google.protobuf.ByteString getQueueBytes() { java.lang.Object ref = queue_; @@ -1445,21 +1213,18 @@ public java.lang.String getQueue() { } } + // optional string user = 7; public static final int USER_FIELD_NUMBER = 7; - private volatile java.lang.Object user_; + private java.lang.Object user_; /** * optional string user = 7; - * @return Whether the user field is set. */ - @java.lang.Override public boolean hasUser() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional string user = 7; - * @return The user. */ - @java.lang.Override public java.lang.String getUser() { java.lang.Object ref = user_; if (ref instanceof java.lang.String) { @@ -1476,9 +1241,7 @@ public java.lang.String getUser() { } /** * optional string user = 7; - * @return The bytes for user. */ - @java.lang.Override public com.google.protobuf.ByteString getUserBytes() { java.lang.Object ref = user_; @@ -1493,21 +1256,18 @@ public java.lang.String getUser() { } } + // optional string operationId = 8; public static final int OPERATIONID_FIELD_NUMBER = 8; - private volatile java.lang.Object operationId_; + private java.lang.Object operationId_; /** * optional string operationId = 8; - * @return Whether the operationId field is set. */ - @java.lang.Override public boolean hasOperationId() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional string operationId = 8; - * @return The operationId. */ - @java.lang.Override public java.lang.String getOperationId() { java.lang.Object ref = operationId_; if (ref instanceof java.lang.String) { @@ -1524,9 +1284,7 @@ public java.lang.String getOperationId() { } /** * optional string operationId = 8; - * @return The bytes for operationId. */ - @java.lang.Override public com.google.protobuf.ByteString getOperationIdBytes() { java.lang.Object ref = operationId_; @@ -1541,89 +1299,78 @@ public java.lang.String getOperationId() { } } + // repeated string tablesWritten = 9; public static final int TABLESWRITTEN_FIELD_NUMBER = 9; private com.google.protobuf.LazyStringList tablesWritten_; /** * repeated string tablesWritten = 9; - * @return A list containing the tablesWritten. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getTablesWrittenList() { return tablesWritten_; } /** * repeated string tablesWritten = 9; - * @return The count of tablesWritten. */ public int getTablesWrittenCount() { return tablesWritten_.size(); } /** * repeated string tablesWritten = 9; - * @param index The index of the element to return. - * @return The tablesWritten at the given index. */ public java.lang.String getTablesWritten(int index) { return tablesWritten_.get(index); } /** * repeated string tablesWritten = 9; - * @param index The index of the value to return. - * @return The bytes of the tablesWritten at the given index. */ public com.google.protobuf.ByteString getTablesWrittenBytes(int index) { return tablesWritten_.getByteString(index); } + // repeated string tablesRead = 10; public static final int TABLESREAD_FIELD_NUMBER = 10; private com.google.protobuf.LazyStringList tablesRead_; /** * repeated string tablesRead = 10; - * @return A list containing the tablesRead. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getTablesReadList() { return tablesRead_; } /** * repeated string tablesRead = 10; - * @return The count of tablesRead. */ public int getTablesReadCount() { return tablesRead_.size(); } /** * repeated string tablesRead = 10; - * @param index The index of the element to return. - * @return The tablesRead at the given index. */ public java.lang.String getTablesRead(int index) { return tablesRead_.get(index); } /** * repeated string tablesRead = 10; - * @param index The index of the value to return. - * @return The bytes of the tablesRead at the given index. */ public com.google.protobuf.ByteString getTablesReadBytes(int index) { return tablesRead_.getByteString(index); } + // repeated .MapFieldEntry otherInfo = 50; public static final int OTHERINFO_FIELD_NUMBER = 50; private java.util.List otherInfo_; /** * repeated .MapFieldEntry otherInfo = 50; */ - @java.lang.Override public java.util.List getOtherInfoList() { return otherInfo_; } /** * repeated .MapFieldEntry otherInfo = 50; */ - @java.lang.Override public java.util.List getOtherInfoOrBuilderList() { return otherInfo_; @@ -1631,111 +1378,127 @@ public java.util.Listrepeated .MapFieldEntry otherInfo = 50; */ - @java.lang.Override public int getOtherInfoCount() { return otherInfo_.size(); } /** * repeated .MapFieldEntry otherInfo = 50; */ - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry getOtherInfo(int index) { return otherInfo_.get(index); } /** * repeated .MapFieldEntry otherInfo = 50; */ - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder getOtherInfoOrBuilder( int index) { return otherInfo_.get(index); } + private void initFields() { + eventType_ = ""; + hiveQueryId_ = ""; + timestamp_ = 0L; + executionMode_ = ""; + requestUser_ = ""; + queue_ = ""; + user_ = ""; + operationId_ = ""; + tablesWritten_ = com.google.protobuf.LazyStringArrayList.EMPTY; + tablesRead_ = com.google.protobuf.LazyStringArrayList.EMPTY; + otherInfo_ = java.util.Collections.emptyList(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, eventType_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getEventTypeBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, hiveQueryId_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getHiveQueryIdBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt64(3, timestamp_); } - if (((bitField0_ & 0x00000008) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 4, executionMode_); + if (((bitField0_ & 0x00000008) == 0x00000008)) { + output.writeBytes(4, getExecutionModeBytes()); } - if (((bitField0_ & 0x00000010) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 5, requestUser_); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + output.writeBytes(5, getRequestUserBytes()); } - if (((bitField0_ & 0x00000020) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 6, queue_); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + output.writeBytes(6, getQueueBytes()); } - if (((bitField0_ & 0x00000040) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 7, user_); + if (((bitField0_ & 0x00000040) == 0x00000040)) { + output.writeBytes(7, getUserBytes()); } - if (((bitField0_ & 0x00000080) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 8, operationId_); + if (((bitField0_ & 0x00000080) == 0x00000080)) { + output.writeBytes(8, getOperationIdBytes()); } for (int i = 0; i < tablesWritten_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 9, tablesWritten_.getRaw(i)); + output.writeBytes(9, tablesWritten_.getByteString(i)); } for (int i = 0; i < tablesRead_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 10, tablesRead_.getRaw(i)); + output.writeBytes(10, tablesRead_.getByteString(i)); } for (int i = 0; i < otherInfo_.size(); i++) { output.writeMessage(50, otherInfo_.get(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, eventType_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getEventTypeBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, hiveQueryId_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getHiveQueryIdBytes()); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(3, timestamp_); } - if (((bitField0_ & 0x00000008) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(4, executionMode_); + if (((bitField0_ & 0x00000008) == 0x00000008)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(4, getExecutionModeBytes()); } - if (((bitField0_ & 0x00000010) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, requestUser_); + if (((bitField0_ & 0x00000010) == 0x00000010)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(5, getRequestUserBytes()); } - if (((bitField0_ & 0x00000020) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, queue_); + if (((bitField0_ & 0x00000020) == 0x00000020)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(6, getQueueBytes()); } - if (((bitField0_ & 0x00000040) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(7, user_); + if (((bitField0_ & 0x00000040) == 0x00000040)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(7, getUserBytes()); } - if (((bitField0_ & 0x00000080) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(8, operationId_); + if (((bitField0_ & 0x00000080) == 0x00000080)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(8, getOperationIdBytes()); } { int dataSize = 0; for (int i = 0; i < tablesWritten_.size(); i++) { - dataSize += computeStringSizeNoTag(tablesWritten_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(tablesWritten_.getByteString(i)); } size += dataSize; size += 1 * getTablesWrittenList().size(); @@ -1743,7 +1506,8 @@ public int getSerializedSize() { { int dataSize = 0; for (int i = 0; i < tablesRead_.size(); i++) { - dataSize += computeStringSizeNoTag(tablesRead_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(tablesRead_.getByteString(i)); } size += dataSize; size += 1 * getTablesReadList().size(); @@ -1752,139 +1516,18 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(50, otherInfo_.get(i)); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto)) { - return super.equals(obj); - } - org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto other = (org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto) obj; - - if (hasEventType() != other.hasEventType()) return false; - if (hasEventType()) { - if (!getEventType() - .equals(other.getEventType())) return false; - } - if (hasHiveQueryId() != other.hasHiveQueryId()) return false; - if (hasHiveQueryId()) { - if (!getHiveQueryId() - .equals(other.getHiveQueryId())) return false; - } - if (hasTimestamp() != other.hasTimestamp()) return false; - if (hasTimestamp()) { - if (getTimestamp() - != other.getTimestamp()) return false; - } - if (hasExecutionMode() != other.hasExecutionMode()) return false; - if (hasExecutionMode()) { - if (!getExecutionMode() - .equals(other.getExecutionMode())) return false; - } - if (hasRequestUser() != other.hasRequestUser()) return false; - if (hasRequestUser()) { - if (!getRequestUser() - .equals(other.getRequestUser())) return false; - } - if (hasQueue() != other.hasQueue()) return false; - if (hasQueue()) { - if (!getQueue() - .equals(other.getQueue())) return false; - } - if (hasUser() != other.hasUser()) return false; - if (hasUser()) { - if (!getUser() - .equals(other.getUser())) return false; - } - if (hasOperationId() != other.hasOperationId()) return false; - if (hasOperationId()) { - if (!getOperationId() - .equals(other.getOperationId())) return false; - } - if (!getTablesWrittenList() - .equals(other.getTablesWrittenList())) return false; - if (!getTablesReadList() - .equals(other.getTablesReadList())) return false; - if (!getOtherInfoList() - .equals(other.getOtherInfoList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasEventType()) { - hash = (37 * hash) + EVENTTYPE_FIELD_NUMBER; - hash = (53 * hash) + getEventType().hashCode(); - } - if (hasHiveQueryId()) { - hash = (37 * hash) + HIVEQUERYID_FIELD_NUMBER; - hash = (53 * hash) + getHiveQueryId().hashCode(); - } - if (hasTimestamp()) { - hash = (37 * hash) + TIMESTAMP_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getTimestamp()); - } - if (hasExecutionMode()) { - hash = (37 * hash) + EXECUTIONMODE_FIELD_NUMBER; - hash = (53 * hash) + getExecutionMode().hashCode(); - } - if (hasRequestUser()) { - hash = (37 * hash) + REQUESTUSER_FIELD_NUMBER; - hash = (53 * hash) + getRequestUser().hashCode(); - } - if (hasQueue()) { - hash = (37 * hash) + QUEUE_FIELD_NUMBER; - hash = (53 * hash) + getQueue().hashCode(); - } - if (hasUser()) { - hash = (37 * hash) + USER_FIELD_NUMBER; - hash = (53 * hash) + getUser().hashCode(); - } - if (hasOperationId()) { - hash = (37 * hash) + OPERATIONID_FIELD_NUMBER; - hash = (53 * hash) + getOperationId().hashCode(); - } - if (getTablesWrittenCount() > 0) { - hash = (37 * hash) + TABLESWRITTEN_FIELD_NUMBER; - hash = (53 * hash) + getTablesWrittenList().hashCode(); - } - if (getTablesReadCount() > 0) { - hash = (37 * hash) + TABLESREAD_FIELD_NUMBER; - hash = (53 * hash) + getTablesReadList().hashCode(); - } - if (getOtherInfoCount() > 0) { - hash = (37 * hash) + OTHERINFO_FIELD_NUMBER; - hash = (53 * hash) + getOtherInfoList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -1908,59 +1551,46 @@ public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEvent } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -1968,16 +1598,14 @@ protected Builder newBuilderForType( * Protobuf type {@code HiveHookEventProto} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:HiveHookEventProto) - org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProtoOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProtoOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_HiveHookEventProto_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_HiveHookEventProto_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -1990,17 +1618,19 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getOtherInfoFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); eventType_ = ""; @@ -2032,18 +1662,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.internal_static_HiveHookEventProto_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto getDefaultInstanceForType() { return org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto build() { org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto result = buildPartial(); if (!result.isInitialized()) { @@ -2052,55 +1683,56 @@ public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto b return result; } - @java.lang.Override public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto buildPartial() { org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto result = new org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.eventType_ = eventType_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.hiveQueryId_ = hiveQueryId_; - if (((from_bitField0_ & 0x00000004) != 0)) { - result.timestamp_ = timestamp_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - if (((from_bitField0_ & 0x00000008) != 0)) { + result.timestamp_ = timestamp_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } result.executionMode_ = executionMode_; - if (((from_bitField0_ & 0x00000010) != 0)) { + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } result.requestUser_ = requestUser_; - if (((from_bitField0_ & 0x00000020) != 0)) { + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } result.queue_ = queue_; - if (((from_bitField0_ & 0x00000040) != 0)) { + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } result.user_ = user_; - if (((from_bitField0_ & 0x00000080) != 0)) { + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } result.operationId_ = operationId_; - if (((bitField0_ & 0x00000100) != 0)) { - tablesWritten_ = tablesWritten_.getUnmodifiableView(); + if (((bitField0_ & 0x00000100) == 0x00000100)) { + tablesWritten_ = new com.google.protobuf.UnmodifiableLazyStringList( + tablesWritten_); bitField0_ = (bitField0_ & ~0x00000100); } result.tablesWritten_ = tablesWritten_; - if (((bitField0_ & 0x00000200) != 0)) { - tablesRead_ = tablesRead_.getUnmodifiableView(); + if (((bitField0_ & 0x00000200) == 0x00000200)) { + tablesRead_ = new com.google.protobuf.UnmodifiableLazyStringList( + tablesRead_); bitField0_ = (bitField0_ & ~0x00000200); } result.tablesRead_ = tablesRead_; if (otherInfoBuilder_ == null) { - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { otherInfo_ = java.util.Collections.unmodifiableList(otherInfo_); bitField0_ = (bitField0_ & ~0x00000400); } @@ -2113,39 +1745,6 @@ public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto b return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto) { return mergeFrom((org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto)other); @@ -2234,24 +1833,21 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.Hi otherInfo_ = other.otherInfo_; bitField0_ = (bitField0_ & ~0x00000400); otherInfoBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getOtherInfoFieldBuilder() : null; } else { otherInfoBuilder_.addAllMessages(other.otherInfo_); } } } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2261,7 +1857,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -2271,27 +1867,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string eventType = 1; private java.lang.Object eventType_ = ""; /** * optional string eventType = 1; - * @return Whether the eventType field is set. */ public boolean hasEventType() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string eventType = 1; - * @return The eventType. */ public java.lang.String getEventType() { java.lang.Object ref = eventType_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - eventType_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + eventType_ = s; return s; } else { return (java.lang.String) ref; @@ -2299,7 +1891,6 @@ public java.lang.String getEventType() { } /** * optional string eventType = 1; - * @return The bytes for eventType. */ public com.google.protobuf.ByteString getEventTypeBytes() { @@ -2316,8 +1907,6 @@ public java.lang.String getEventType() { } /** * optional string eventType = 1; - * @param value The eventType to set. - * @return This builder for chaining. */ public Builder setEventType( java.lang.String value) { @@ -2331,7 +1920,6 @@ public Builder setEventType( } /** * optional string eventType = 1; - * @return This builder for chaining. */ public Builder clearEventType() { bitField0_ = (bitField0_ & ~0x00000001); @@ -2341,8 +1929,6 @@ public Builder clearEventType() { } /** * optional string eventType = 1; - * @param value The bytes for eventType to set. - * @return This builder for chaining. */ public Builder setEventTypeBytes( com.google.protobuf.ByteString value) { @@ -2355,27 +1941,23 @@ public Builder setEventTypeBytes( return this; } + // optional string hiveQueryId = 2; private java.lang.Object hiveQueryId_ = ""; /** * optional string hiveQueryId = 2; - * @return Whether the hiveQueryId field is set. */ public boolean hasHiveQueryId() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string hiveQueryId = 2; - * @return The hiveQueryId. */ public java.lang.String getHiveQueryId() { java.lang.Object ref = hiveQueryId_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - hiveQueryId_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + hiveQueryId_ = s; return s; } else { return (java.lang.String) ref; @@ -2383,7 +1965,6 @@ public java.lang.String getHiveQueryId() { } /** * optional string hiveQueryId = 2; - * @return The bytes for hiveQueryId. */ public com.google.protobuf.ByteString getHiveQueryIdBytes() { @@ -2400,8 +1981,6 @@ public java.lang.String getHiveQueryId() { } /** * optional string hiveQueryId = 2; - * @param value The hiveQueryId to set. - * @return This builder for chaining. */ public Builder setHiveQueryId( java.lang.String value) { @@ -2415,7 +1994,6 @@ public Builder setHiveQueryId( } /** * optional string hiveQueryId = 2; - * @return This builder for chaining. */ public Builder clearHiveQueryId() { bitField0_ = (bitField0_ & ~0x00000002); @@ -2425,8 +2003,6 @@ public Builder clearHiveQueryId() { } /** * optional string hiveQueryId = 2; - * @param value The bytes for hiveQueryId to set. - * @return This builder for chaining. */ public Builder setHiveQueryIdBytes( com.google.protobuf.ByteString value) { @@ -2439,27 +2015,22 @@ public Builder setHiveQueryIdBytes( return this; } + // optional int64 timestamp = 3; private long timestamp_ ; /** * optional int64 timestamp = 3; - * @return Whether the timestamp field is set. */ - @java.lang.Override public boolean hasTimestamp() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int64 timestamp = 3; - * @return The timestamp. */ - @java.lang.Override public long getTimestamp() { return timestamp_; } /** * optional int64 timestamp = 3; - * @param value The timestamp to set. - * @return This builder for chaining. */ public Builder setTimestamp(long value) { bitField0_ |= 0x00000004; @@ -2469,7 +2040,6 @@ public Builder setTimestamp(long value) { } /** * optional int64 timestamp = 3; - * @return This builder for chaining. */ public Builder clearTimestamp() { bitField0_ = (bitField0_ & ~0x00000004); @@ -2478,27 +2048,23 @@ public Builder clearTimestamp() { return this; } + // optional string executionMode = 4; private java.lang.Object executionMode_ = ""; /** * optional string executionMode = 4; - * @return Whether the executionMode field is set. */ public boolean hasExecutionMode() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional string executionMode = 4; - * @return The executionMode. */ public java.lang.String getExecutionMode() { java.lang.Object ref = executionMode_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - executionMode_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + executionMode_ = s; return s; } else { return (java.lang.String) ref; @@ -2506,7 +2072,6 @@ public java.lang.String getExecutionMode() { } /** * optional string executionMode = 4; - * @return The bytes for executionMode. */ public com.google.protobuf.ByteString getExecutionModeBytes() { @@ -2523,8 +2088,6 @@ public java.lang.String getExecutionMode() { } /** * optional string executionMode = 4; - * @param value The executionMode to set. - * @return This builder for chaining. */ public Builder setExecutionMode( java.lang.String value) { @@ -2538,7 +2101,6 @@ public Builder setExecutionMode( } /** * optional string executionMode = 4; - * @return This builder for chaining. */ public Builder clearExecutionMode() { bitField0_ = (bitField0_ & ~0x00000008); @@ -2548,8 +2110,6 @@ public Builder clearExecutionMode() { } /** * optional string executionMode = 4; - * @param value The bytes for executionMode to set. - * @return This builder for chaining. */ public Builder setExecutionModeBytes( com.google.protobuf.ByteString value) { @@ -2562,27 +2122,23 @@ public Builder setExecutionModeBytes( return this; } + // optional string requestUser = 5; private java.lang.Object requestUser_ = ""; /** * optional string requestUser = 5; - * @return Whether the requestUser field is set. */ public boolean hasRequestUser() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional string requestUser = 5; - * @return The requestUser. */ public java.lang.String getRequestUser() { java.lang.Object ref = requestUser_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - requestUser_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + requestUser_ = s; return s; } else { return (java.lang.String) ref; @@ -2590,7 +2146,6 @@ public java.lang.String getRequestUser() { } /** * optional string requestUser = 5; - * @return The bytes for requestUser. */ public com.google.protobuf.ByteString getRequestUserBytes() { @@ -2607,8 +2162,6 @@ public java.lang.String getRequestUser() { } /** * optional string requestUser = 5; - * @param value The requestUser to set. - * @return This builder for chaining. */ public Builder setRequestUser( java.lang.String value) { @@ -2622,7 +2175,6 @@ public Builder setRequestUser( } /** * optional string requestUser = 5; - * @return This builder for chaining. */ public Builder clearRequestUser() { bitField0_ = (bitField0_ & ~0x00000010); @@ -2632,8 +2184,6 @@ public Builder clearRequestUser() { } /** * optional string requestUser = 5; - * @param value The bytes for requestUser to set. - * @return This builder for chaining. */ public Builder setRequestUserBytes( com.google.protobuf.ByteString value) { @@ -2646,27 +2196,23 @@ public Builder setRequestUserBytes( return this; } + // optional string queue = 6; private java.lang.Object queue_ = ""; /** * optional string queue = 6; - * @return Whether the queue field is set. */ public boolean hasQueue() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional string queue = 6; - * @return The queue. */ public java.lang.String getQueue() { java.lang.Object ref = queue_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - queue_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + queue_ = s; return s; } else { return (java.lang.String) ref; @@ -2674,7 +2220,6 @@ public java.lang.String getQueue() { } /** * optional string queue = 6; - * @return The bytes for queue. */ public com.google.protobuf.ByteString getQueueBytes() { @@ -2691,8 +2236,6 @@ public java.lang.String getQueue() { } /** * optional string queue = 6; - * @param value The queue to set. - * @return This builder for chaining. */ public Builder setQueue( java.lang.String value) { @@ -2706,7 +2249,6 @@ public Builder setQueue( } /** * optional string queue = 6; - * @return This builder for chaining. */ public Builder clearQueue() { bitField0_ = (bitField0_ & ~0x00000020); @@ -2716,8 +2258,6 @@ public Builder clearQueue() { } /** * optional string queue = 6; - * @param value The bytes for queue to set. - * @return This builder for chaining. */ public Builder setQueueBytes( com.google.protobuf.ByteString value) { @@ -2730,27 +2270,23 @@ public Builder setQueueBytes( return this; } + // optional string user = 7; private java.lang.Object user_ = ""; /** * optional string user = 7; - * @return Whether the user field is set. */ public boolean hasUser() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional string user = 7; - * @return The user. */ public java.lang.String getUser() { java.lang.Object ref = user_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - user_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + user_ = s; return s; } else { return (java.lang.String) ref; @@ -2758,7 +2294,6 @@ public java.lang.String getUser() { } /** * optional string user = 7; - * @return The bytes for user. */ public com.google.protobuf.ByteString getUserBytes() { @@ -2775,8 +2310,6 @@ public java.lang.String getUser() { } /** * optional string user = 7; - * @param value The user to set. - * @return This builder for chaining. */ public Builder setUser( java.lang.String value) { @@ -2790,7 +2323,6 @@ public Builder setUser( } /** * optional string user = 7; - * @return This builder for chaining. */ public Builder clearUser() { bitField0_ = (bitField0_ & ~0x00000040); @@ -2800,8 +2332,6 @@ public Builder clearUser() { } /** * optional string user = 7; - * @param value The bytes for user to set. - * @return This builder for chaining. */ public Builder setUserBytes( com.google.protobuf.ByteString value) { @@ -2814,27 +2344,23 @@ public Builder setUserBytes( return this; } + // optional string operationId = 8; private java.lang.Object operationId_ = ""; /** * optional string operationId = 8; - * @return Whether the operationId field is set. */ public boolean hasOperationId() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional string operationId = 8; - * @return The operationId. */ public java.lang.String getOperationId() { java.lang.Object ref = operationId_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - operationId_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + operationId_ = s; return s; } else { return (java.lang.String) ref; @@ -2842,7 +2368,6 @@ public java.lang.String getOperationId() { } /** * optional string operationId = 8; - * @return The bytes for operationId. */ public com.google.protobuf.ByteString getOperationIdBytes() { @@ -2859,8 +2384,6 @@ public java.lang.String getOperationId() { } /** * optional string operationId = 8; - * @param value The operationId to set. - * @return This builder for chaining. */ public Builder setOperationId( java.lang.String value) { @@ -2874,7 +2397,6 @@ public Builder setOperationId( } /** * optional string operationId = 8; - * @return This builder for chaining. */ public Builder clearOperationId() { bitField0_ = (bitField0_ & ~0x00000080); @@ -2884,8 +2406,6 @@ public Builder clearOperationId() { } /** * optional string operationId = 8; - * @param value The bytes for operationId to set. - * @return This builder for chaining. */ public Builder setOperationIdBytes( com.google.protobuf.ByteString value) { @@ -2898,40 +2418,35 @@ public Builder setOperationIdBytes( return this; } + // repeated string tablesWritten = 9; private com.google.protobuf.LazyStringList tablesWritten_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureTablesWrittenIsMutable() { - if (!((bitField0_ & 0x00000100) != 0)) { + if (!((bitField0_ & 0x00000100) == 0x00000100)) { tablesWritten_ = new com.google.protobuf.LazyStringArrayList(tablesWritten_); bitField0_ |= 0x00000100; } } /** * repeated string tablesWritten = 9; - * @return A list containing the tablesWritten. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getTablesWrittenList() { - return tablesWritten_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(tablesWritten_); } /** * repeated string tablesWritten = 9; - * @return The count of tablesWritten. */ public int getTablesWrittenCount() { return tablesWritten_.size(); } /** * repeated string tablesWritten = 9; - * @param index The index of the element to return. - * @return The tablesWritten at the given index. */ public java.lang.String getTablesWritten(int index) { return tablesWritten_.get(index); } /** * repeated string tablesWritten = 9; - * @param index The index of the value to return. - * @return The bytes of the tablesWritten at the given index. */ public com.google.protobuf.ByteString getTablesWrittenBytes(int index) { @@ -2939,9 +2454,6 @@ public java.lang.String getTablesWritten(int index) { } /** * repeated string tablesWritten = 9; - * @param index The index to set the value at. - * @param value The tablesWritten to set. - * @return This builder for chaining. */ public Builder setTablesWritten( int index, java.lang.String value) { @@ -2955,8 +2467,6 @@ public Builder setTablesWritten( } /** * repeated string tablesWritten = 9; - * @param value The tablesWritten to add. - * @return This builder for chaining. */ public Builder addTablesWritten( java.lang.String value) { @@ -2970,20 +2480,16 @@ public Builder addTablesWritten( } /** * repeated string tablesWritten = 9; - * @param values The tablesWritten to add. - * @return This builder for chaining. */ public Builder addAllTablesWritten( java.lang.Iterable values) { ensureTablesWrittenIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, tablesWritten_); + super.addAll(values, tablesWritten_); onChanged(); return this; } /** * repeated string tablesWritten = 9; - * @return This builder for chaining. */ public Builder clearTablesWritten() { tablesWritten_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -2993,8 +2499,6 @@ public Builder clearTablesWritten() { } /** * repeated string tablesWritten = 9; - * @param value The bytes of the tablesWritten to add. - * @return This builder for chaining. */ public Builder addTablesWrittenBytes( com.google.protobuf.ByteString value) { @@ -3007,40 +2511,35 @@ public Builder addTablesWrittenBytes( return this; } + // repeated string tablesRead = 10; private com.google.protobuf.LazyStringList tablesRead_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureTablesReadIsMutable() { - if (!((bitField0_ & 0x00000200) != 0)) { + if (!((bitField0_ & 0x00000200) == 0x00000200)) { tablesRead_ = new com.google.protobuf.LazyStringArrayList(tablesRead_); bitField0_ |= 0x00000200; } } /** * repeated string tablesRead = 10; - * @return A list containing the tablesRead. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getTablesReadList() { - return tablesRead_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(tablesRead_); } /** * repeated string tablesRead = 10; - * @return The count of tablesRead. */ public int getTablesReadCount() { return tablesRead_.size(); } /** * repeated string tablesRead = 10; - * @param index The index of the element to return. - * @return The tablesRead at the given index. */ public java.lang.String getTablesRead(int index) { return tablesRead_.get(index); } /** * repeated string tablesRead = 10; - * @param index The index of the value to return. - * @return The bytes of the tablesRead at the given index. */ public com.google.protobuf.ByteString getTablesReadBytes(int index) { @@ -3048,9 +2547,6 @@ public java.lang.String getTablesRead(int index) { } /** * repeated string tablesRead = 10; - * @param index The index to set the value at. - * @param value The tablesRead to set. - * @return This builder for chaining. */ public Builder setTablesRead( int index, java.lang.String value) { @@ -3064,8 +2560,6 @@ public Builder setTablesRead( } /** * repeated string tablesRead = 10; - * @param value The tablesRead to add. - * @return This builder for chaining. */ public Builder addTablesRead( java.lang.String value) { @@ -3079,20 +2573,16 @@ public Builder addTablesRead( } /** * repeated string tablesRead = 10; - * @param values The tablesRead to add. - * @return This builder for chaining. */ public Builder addAllTablesRead( java.lang.Iterable values) { ensureTablesReadIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, tablesRead_); + super.addAll(values, tablesRead_); onChanged(); return this; } /** * repeated string tablesRead = 10; - * @return This builder for chaining. */ public Builder clearTablesRead() { tablesRead_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -3102,8 +2592,6 @@ public Builder clearTablesRead() { } /** * repeated string tablesRead = 10; - * @param value The bytes of the tablesRead to add. - * @return This builder for chaining. */ public Builder addTablesReadBytes( com.google.protobuf.ByteString value) { @@ -3116,16 +2604,17 @@ public Builder addTablesReadBytes( return this; } + // repeated .MapFieldEntry otherInfo = 50; private java.util.List otherInfo_ = java.util.Collections.emptyList(); private void ensureOtherInfoIsMutable() { - if (!((bitField0_ & 0x00000400) != 0)) { + if (!((bitField0_ & 0x00000400) == 0x00000400)) { otherInfo_ = new java.util.ArrayList(otherInfo_); bitField0_ |= 0x00000400; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder> otherInfoBuilder_; /** @@ -3257,8 +2746,7 @@ public Builder addAllOtherInfo( java.lang.Iterable values) { if (otherInfoBuilder_ == null) { ensureOtherInfoIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, otherInfo_); + super.addAll(values, otherInfo_); onChanged(); } else { otherInfoBuilder_.addAllMessages(values); @@ -3341,89 +2829,48 @@ public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builde getOtherInfoBuilderList() { return getOtherInfoFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder> getOtherInfoFieldBuilder() { if (otherInfoBuilder_ == null) { - otherInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + otherInfoBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.MapFieldEntryOrBuilder>( otherInfo_, - ((bitField0_ & 0x00000400) != 0), + ((bitField0_ & 0x00000400) == 0x00000400), getParentForChildren(), isClean()); otherInfo_ = null; } return otherInfoBuilder_; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:HiveHookEventProto) } - // @@protoc_insertion_point(class_scope:HiveHookEventProto) - private static final org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto(); - } - - public static org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public HiveHookEventProto parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new HiveHookEventProto(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new HiveHookEventProto(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:HiveHookEventProto) } - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_MapFieldEntry_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_MapFieldEntry_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_HiveHookEventProto_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_HiveHookEventProto_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } - private static com.google.protobuf.Descriptors.FileDescriptor + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { @@ -3438,22 +2885,30 @@ public org.apache.hadoop.hive.ql.hooks.proto.HiveHookEvents.HiveHookEventProto g "%org.apache.hadoop.hive.ql.hooks.protoB\016" + "HiveHookEvents" }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_MapFieldEntry_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_MapFieldEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MapFieldEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_HiveHookEventProto_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_HiveHookEventProto_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_HiveHookEventProto_descriptor, + new java.lang.String[] { "EventType", "HiveQueryId", "Timestamp", "ExecutionMode", "RequestUser", "Queue", "User", "OperationId", "TablesWritten", "TablesRead", "OtherInfo", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { - }); - internal_static_MapFieldEntry_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_MapFieldEntry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_MapFieldEntry_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_HiveHookEventProto_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_HiveHookEventProto_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_HiveHookEventProto_descriptor, - new java.lang.String[] { "EventType", "HiveQueryId", "Timestamp", "ExecutionMode", "RequestUser", "Queue", "User", "OperationId", "TablesWritten", "TablesRead", "OtherInfo", }); + }, assigner); } // @@protoc_insertion_point(outer_class_scope) diff --git a/ql/src/gen/protobuf/gen-test/org/apache/hadoop/hive/ql/io/protobuf/SampleProtos.java b/ql/src/gen/protobuf/gen-test/org/apache/hadoop/hive/ql/io/protobuf/SampleProtos.java index 23202ea491c5..ac75608439d0 100644 --- a/ql/src/gen/protobuf/gen-test/org/apache/hadoop/hive/ql/io/protobuf/SampleProtos.java +++ b/ql/src/gen/protobuf/gen-test/org/apache/hadoop/hive/ql/io/protobuf/SampleProtos.java @@ -5,49 +5,38 @@ public final class SampleProtos { private SampleProtos() {} - public static void registerAllExtensions( - com.google.protobuf.ExtensionRegistryLite registry) { - } - public static void registerAllExtensions( com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions( - (com.google.protobuf.ExtensionRegistryLite) registry); } - public interface MapFieldEntryOrBuilder extends - // @@protoc_insertion_point(interface_extends:MapFieldEntry) - com.google.protobuf.MessageOrBuilder { + public interface MapFieldEntryOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional string key = 1; /** * optional string key = 1; - * @return Whether the key field is set. */ boolean hasKey(); /** * optional string key = 1; - * @return The key. */ java.lang.String getKey(); /** * optional string key = 1; - * @return The bytes for key. */ com.google.protobuf.ByteString getKeyBytes(); + // optional string value = 2; /** * optional string value = 2; - * @return Whether the value field is set. */ boolean hasValue(); /** * optional string value = 2; - * @return The value. */ java.lang.String getValue(); /** * optional string value = 2; - * @return The bytes for value. */ com.google.protobuf.ByteString getValueBytes(); @@ -56,39 +45,35 @@ public interface MapFieldEntryOrBuilder extends * Protobuf type {@code MapFieldEntry} */ public static final class MapFieldEntry extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:MapFieldEntry) - MapFieldEntryOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements MapFieldEntryOrBuilder { // Use MapFieldEntry.newBuilder() to construct. - private MapFieldEntry(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private MapFieldEntry(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private MapFieldEntry() { - key_ = ""; - value_ = ""; + private MapFieldEntry(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final MapFieldEntry defaultInstance; + public static MapFieldEntry getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new MapFieldEntry(); + public MapFieldEntry getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private MapFieldEntry( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -100,34 +85,30 @@ private MapFieldEntry( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000001; - key_ = bs; + key_ = input.readBytes(); break; } case 18: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00000002; - value_ = bs; - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } + value_ = input.readBytes(); break; } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -138,30 +119,41 @@ private MapFieldEntry( return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_MapFieldEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_MapFieldEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.class, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public MapFieldEntry parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new MapFieldEntry(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // optional string key = 1; public static final int KEY_FIELD_NUMBER = 1; - private volatile java.lang.Object key_; + private java.lang.Object key_; /** * optional string key = 1; - * @return Whether the key field is set. */ - @java.lang.Override public boolean hasKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string key = 1; - * @return The key. */ - @java.lang.Override public java.lang.String getKey() { java.lang.Object ref = key_; if (ref instanceof java.lang.String) { @@ -178,9 +170,7 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @return The bytes for key. */ - @java.lang.Override public com.google.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; @@ -195,21 +185,18 @@ public java.lang.String getKey() { } } + // optional string value = 2; public static final int VALUE_FIELD_NUMBER = 2; - private volatile java.lang.Object value_; + private java.lang.Object value_; /** * optional string value = 2; - * @return Whether the value field is set. */ - @java.lang.Override public boolean hasValue() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string value = 2; - * @return The value. */ - @java.lang.Override public java.lang.String getValue() { java.lang.Object ref = value_; if (ref instanceof java.lang.String) { @@ -226,9 +213,7 @@ public java.lang.String getValue() { } /** * optional string value = 2; - * @return The bytes for value. */ - @java.lang.Override public com.google.protobuf.ByteString getValueBytes() { java.lang.Object ref = value_; @@ -243,101 +228,57 @@ public java.lang.String getValue() { } } + private void initFields() { + key_ = ""; + value_ = ""; + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, key_); + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeBytes(1, getKeyBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, value_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + output.writeBytes(2, getValueBytes()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, key_); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(1, getKeyBytes()); } - if (((bitField0_ & 0x00000002) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, value_); + if (((bitField0_ & 0x00000002) == 0x00000002)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(2, getValueBytes()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry)) { - return super.equals(obj); - } - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry other = (org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry) obj; - - if (hasKey() != other.hasKey()) return false; - if (hasKey()) { - if (!getKey() - .equals(other.getKey())) return false; - } - if (hasValue() != other.hasValue()) return false; - if (hasValue()) { - if (!getValue() - .equals(other.getValue())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasKey()) { - hash = (37 * hash) + KEY_FIELD_NUMBER; - hash = (53 * hash) + getKey().hashCode(); - } - if (hasValue()) { - hash = (37 * hash) + VALUE_FIELD_NUMBER; - hash = (53 * hash) + getValue().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -361,59 +302,46 @@ public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry p } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -421,16 +349,14 @@ protected Builder newBuilderForType( * Protobuf type {@code MapFieldEntry} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:MapFieldEntry) - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_MapFieldEntry_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_MapFieldEntry_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -443,16 +369,18 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); key_ = ""; @@ -462,18 +390,19 @@ public Builder clear() { return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_MapFieldEntry_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getDefaultInstanceForType() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry build() { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry result = buildPartial(); if (!result.isInitialized()) { @@ -482,16 +411,15 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry build() return result; } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry buildPartial() { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry result = new org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.key_ = key_; - if (((from_bitField0_ & 0x00000002) != 0)) { + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.value_ = value_; @@ -500,39 +428,6 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry buildPar return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry) { return mergeFrom((org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry)other); @@ -554,17 +449,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapF value_ = other.value_; onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -574,7 +466,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -584,27 +476,23 @@ public Builder mergeFrom( } private int bitField0_; + // optional string key = 1; private java.lang.Object key_ = ""; /** * optional string key = 1; - * @return Whether the key field is set. */ public boolean hasKey() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional string key = 1; - * @return The key. */ public java.lang.String getKey() { java.lang.Object ref = key_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - key_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + key_ = s; return s; } else { return (java.lang.String) ref; @@ -612,7 +500,6 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @return The bytes for key. */ public com.google.protobuf.ByteString getKeyBytes() { @@ -629,8 +516,6 @@ public java.lang.String getKey() { } /** * optional string key = 1; - * @param value The key to set. - * @return This builder for chaining. */ public Builder setKey( java.lang.String value) { @@ -644,7 +529,6 @@ public Builder setKey( } /** * optional string key = 1; - * @return This builder for chaining. */ public Builder clearKey() { bitField0_ = (bitField0_ & ~0x00000001); @@ -654,8 +538,6 @@ public Builder clearKey() { } /** * optional string key = 1; - * @param value The bytes for key to set. - * @return This builder for chaining. */ public Builder setKeyBytes( com.google.protobuf.ByteString value) { @@ -668,27 +550,23 @@ public Builder setKeyBytes( return this; } + // optional string value = 2; private java.lang.Object value_ = ""; /** * optional string value = 2; - * @return Whether the value field is set. */ public boolean hasValue() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional string value = 2; - * @return The value. */ public java.lang.String getValue() { java.lang.Object ref = value_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - value_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + value_ = s; return s; } else { return (java.lang.String) ref; @@ -696,7 +574,6 @@ public java.lang.String getValue() { } /** * optional string value = 2; - * @return The bytes for value. */ public com.google.protobuf.ByteString getValueBytes() { @@ -713,8 +590,6 @@ public java.lang.String getValue() { } /** * optional string value = 2; - * @param value The value to set. - * @return This builder for chaining. */ public Builder setValue( java.lang.String value) { @@ -728,7 +603,6 @@ public Builder setValue( } /** * optional string value = 2; - * @return This builder for chaining. */ public Builder clearValue() { bitField0_ = (bitField0_ & ~0x00000002); @@ -738,8 +612,6 @@ public Builder clearValue() { } /** * optional string value = 2; - * @param value The bytes for value to set. - * @return This builder for chaining. */ public Builder setValueBytes( com.google.protobuf.ByteString value) { @@ -751,63 +623,22 @@ public Builder setValueBytes( onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:MapFieldEntry) } - // @@protoc_insertion_point(class_scope:MapFieldEntry) - private static final org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry(); - } - - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public MapFieldEntry parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new MapFieldEntry(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new MapFieldEntry(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:MapFieldEntry) } - public interface Mesg1OrBuilder extends - // @@protoc_insertion_point(interface_extends:Mesg1) - com.google.protobuf.MessageOrBuilder { + public interface Mesg1OrBuilder + extends com.google.protobuf.MessageOrBuilder { + // repeated .MapFieldEntry anotherMap = 1; /** * repeated .MapFieldEntry anotherMap = 1; */ @@ -832,14 +663,13 @@ public interface Mesg1OrBuilder extends org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getAnotherMapOrBuilder( int index); + // optional .MapFieldEntry noMap = 2; /** * optional .MapFieldEntry noMap = 2; - * @return Whether the noMap field is set. */ boolean hasNoMap(); /** * optional .MapFieldEntry noMap = 2; - * @return The noMap. */ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getNoMap(); /** @@ -847,20 +677,17 @@ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getAno */ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getNoMapOrBuilder(); + // repeated int32 intList = 3; /** * repeated int32 intList = 3; - * @return A list containing the intList. */ java.util.List getIntListList(); /** * repeated int32 intList = 3; - * @return The count of intList. */ int getIntListCount(); /** * repeated int32 intList = 3; - * @param index The index of the element to return. - * @return The intList at the given index. */ int getIntList(int index); } @@ -868,39 +695,35 @@ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getAno * Protobuf type {@code Mesg1} */ public static final class Mesg1 extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:Mesg1) - Mesg1OrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements Mesg1OrBuilder { // Use Mesg1.newBuilder() to construct. - private Mesg1(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private Mesg1(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private Mesg1() { - anotherMap_ = java.util.Collections.emptyList(); - intList_ = emptyIntList(); + private Mesg1(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final Mesg1 defaultInstance; + public static Mesg1 getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new Mesg1(); + public Mesg1 getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private Mesg1( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -912,18 +735,24 @@ private Mesg1( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 10: { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { + if (!((mutable_bitField0_ & 0x00000001) == 0x00000001)) { anotherMap_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000001; } - anotherMap_.add( - input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.PARSER, extensionRegistry)); + anotherMap_.add(input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.PARSER, extensionRegistry)); break; } case 18: { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder subBuilder = null; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { subBuilder = noMap_.toBuilder(); } noMap_ = input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.PARSER, extensionRegistry); @@ -935,48 +764,39 @@ private Mesg1( break; } case 24: { - if (!((mutable_bitField0_ & 0x00000004) != 0)) { - intList_ = newIntList(); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + intList_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } - intList_.addInt(input.readInt32()); + intList_.add(input.readInt32()); break; } case 26: { int length = input.readRawVarint32(); int limit = input.pushLimit(length); - if (!((mutable_bitField0_ & 0x00000004) != 0) && input.getBytesUntilLimit() > 0) { - intList_ = newIntList(); + if (!((mutable_bitField0_ & 0x00000004) == 0x00000004) && input.getBytesUntilLimit() > 0) { + intList_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00000004; } while (input.getBytesUntilLimit() > 0) { - intList_.addInt(input.readInt32()); + intList_.add(input.readInt32()); } input.popLimit(limit); break; } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } } } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { + if (((mutable_bitField0_ & 0x00000001) == 0x00000001)) { anotherMap_ = java.util.Collections.unmodifiableList(anotherMap_); } - if (((mutable_bitField0_ & 0x00000004) != 0)) { - intList_.makeImmutable(); // C + if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) { + intList_ = java.util.Collections.unmodifiableList(intList_); } this.unknownFields = unknownFields.build(); makeExtensionsImmutable(); @@ -987,28 +807,41 @@ private Mesg1( return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_Mesg1_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_Mesg1_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.class, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public Mesg1 parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new Mesg1(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + private int bitField0_; + // repeated .MapFieldEntry anotherMap = 1; public static final int ANOTHERMAP_FIELD_NUMBER = 1; private java.util.List anotherMap_; /** * repeated .MapFieldEntry anotherMap = 1; */ - @java.lang.Override public java.util.List getAnotherMapList() { return anotherMap_; } /** * repeated .MapFieldEntry anotherMap = 1; */ - @java.lang.Override public java.util.List getAnotherMapOrBuilderList() { return anotherMap_; @@ -1016,108 +849,100 @@ public java.util.Listrepeated .MapFieldEntry anotherMap = 1; */ - @java.lang.Override public int getAnotherMapCount() { return anotherMap_.size(); } /** * repeated .MapFieldEntry anotherMap = 1; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getAnotherMap(int index) { return anotherMap_.get(index); } /** * repeated .MapFieldEntry anotherMap = 1; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getAnotherMapOrBuilder( int index) { return anotherMap_.get(index); } + // optional .MapFieldEntry noMap = 2; public static final int NOMAP_FIELD_NUMBER = 2; private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry noMap_; /** * optional .MapFieldEntry noMap = 2; - * @return Whether the noMap field is set. */ - @java.lang.Override public boolean hasNoMap() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional .MapFieldEntry noMap = 2; - * @return The noMap. */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getNoMap() { - return noMap_ == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance() : noMap_; + return noMap_; } /** * optional .MapFieldEntry noMap = 2; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getNoMapOrBuilder() { - return noMap_ == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance() : noMap_; + return noMap_; } + // repeated int32 intList = 3; public static final int INTLIST_FIELD_NUMBER = 3; - private com.google.protobuf.Internal.IntList intList_; + private java.util.List intList_; /** * repeated int32 intList = 3; - * @return A list containing the intList. */ - @java.lang.Override public java.util.List getIntListList() { return intList_; } /** * repeated int32 intList = 3; - * @return The count of intList. */ public int getIntListCount() { return intList_.size(); } /** * repeated int32 intList = 3; - * @param index The index of the element to return. - * @return The intList at the given index. */ public int getIntList(int index) { - return intList_.getInt(index); + return intList_.get(index); } + private void initFields() { + anotherMap_ = java.util.Collections.emptyList(); + noMap_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance(); + intList_ = java.util.Collections.emptyList(); + } private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getSerializedSize(); for (int i = 0; i < anotherMap_.size(); i++) { output.writeMessage(1, anotherMap_.get(i)); } - if (((bitField0_ & 0x00000001) != 0)) { - output.writeMessage(2, getNoMap()); + if (((bitField0_ & 0x00000001) == 0x00000001)) { + output.writeMessage(2, noMap_); } for (int i = 0; i < intList_.size(); i++) { - output.writeInt32(3, intList_.getInt(i)); + output.writeInt32(3, intList_.get(i)); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; @@ -1125,82 +950,31 @@ public int getSerializedSize() { size += com.google.protobuf.CodedOutputStream .computeMessageSize(1, anotherMap_.get(i)); } - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, getNoMap()); + .computeMessageSize(2, noMap_); } { int dataSize = 0; for (int i = 0; i < intList_.size(); i++) { dataSize += com.google.protobuf.CodedOutputStream - .computeInt32SizeNoTag(intList_.getInt(i)); + .computeInt32SizeNoTag(intList_.get(i)); } size += dataSize; size += 1 * getIntListList().size(); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1)) { - return super.equals(obj); - } - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 other = (org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1) obj; - - if (!getAnotherMapList() - .equals(other.getAnotherMapList())) return false; - if (hasNoMap() != other.hasNoMap()) return false; - if (hasNoMap()) { - if (!getNoMap() - .equals(other.getNoMap())) return false; - } - if (!getIntListList() - .equals(other.getIntListList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (getAnotherMapCount() > 0) { - hash = (37 * hash) + ANOTHERMAP_FIELD_NUMBER; - hash = (53 * hash) + getAnotherMapList().hashCode(); - } - if (hasNoMap()) { - hash = (37 * hash) + NOMAP_FIELD_NUMBER; - hash = (53 * hash) + getNoMap().hashCode(); - } - if (getIntListCount() > 0) { - hash = (37 * hash) + INTLIST_FIELD_NUMBER; - hash = (53 * hash) + getIntListList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -1224,59 +998,46 @@ public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -1284,16 +1045,14 @@ protected Builder newBuilderForType( * Protobuf type {@code Mesg1} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:Mesg1) - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_Mesg1_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_Mesg1_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -1306,18 +1065,20 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getAnotherMapFieldBuilder(); getNoMapFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); if (anotherMapBuilder_ == null) { @@ -1327,28 +1088,29 @@ public Builder clear() { anotherMapBuilder_.clear(); } if (noMapBuilder_ == null) { - noMap_ = null; + noMap_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance(); } else { noMapBuilder_.clear(); } bitField0_ = (bitField0_ & ~0x00000002); - intList_ = emptyIntList(); + intList_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_Mesg1_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getDefaultInstanceForType() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 build() { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 result = buildPartial(); if (!result.isInitialized()) { @@ -1357,13 +1119,12 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 build() { return result; } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 buildPartial() { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 result = new org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (anotherMapBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { anotherMap_ = java.util.Collections.unmodifiableList(anotherMap_); bitField0_ = (bitField0_ & ~0x00000001); } @@ -1371,16 +1132,16 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 buildPartial() { } else { result.anotherMap_ = anotherMapBuilder_.build(); } - if (((from_bitField0_ & 0x00000002) != 0)) { - if (noMapBuilder_ == null) { - result.noMap_ = noMap_; - } else { - result.noMap_ = noMapBuilder_.build(); - } + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000001; } - if (((bitField0_ & 0x00000004) != 0)) { - intList_.makeImmutable(); + if (noMapBuilder_ == null) { + result.noMap_ = noMap_; + } else { + result.noMap_ = noMapBuilder_.build(); + } + if (((bitField0_ & 0x00000004) == 0x00000004)) { + intList_ = java.util.Collections.unmodifiableList(intList_); bitField0_ = (bitField0_ & ~0x00000004); } result.intList_ = intList_; @@ -1389,39 +1150,6 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 buildPartial() { return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1) { return mergeFrom((org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1)other); @@ -1452,7 +1180,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg anotherMap_ = other.anotherMap_; bitField0_ = (bitField0_ & ~0x00000001); anotherMapBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getAnotherMapFieldBuilder() : null; } else { anotherMapBuilder_.addAllMessages(other.anotherMap_); @@ -1472,17 +1200,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg } onChanged(); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1492,7 +1217,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -1502,16 +1227,17 @@ public Builder mergeFrom( } private int bitField0_; + // repeated .MapFieldEntry anotherMap = 1; private java.util.List anotherMap_ = java.util.Collections.emptyList(); private void ensureAnotherMapIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { + if (!((bitField0_ & 0x00000001) == 0x00000001)) { anotherMap_ = new java.util.ArrayList(anotherMap_); bitField0_ |= 0x00000001; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder> anotherMapBuilder_; /** @@ -1643,8 +1369,7 @@ public Builder addAllAnotherMap( java.lang.Iterable values) { if (anotherMapBuilder_ == null) { ensureAnotherMapIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, anotherMap_); + super.addAll(values, anotherMap_); onChanged(); } else { anotherMapBuilder_.addAllMessages(values); @@ -1727,14 +1452,14 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder getAnotherMapBuilderList() { return getAnotherMapFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder> getAnotherMapFieldBuilder() { if (anotherMapBuilder_ == null) { - anotherMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + anotherMapBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder>( anotherMap_, - ((bitField0_ & 0x00000001) != 0), + ((bitField0_ & 0x00000001) == 0x00000001), getParentForChildren(), isClean()); anotherMap_ = null; @@ -1742,23 +1467,22 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder return anotherMapBuilder_; } - private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry noMap_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .MapFieldEntry noMap = 2; + private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry noMap_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder> noMapBuilder_; /** * optional .MapFieldEntry noMap = 2; - * @return Whether the noMap field is set. */ public boolean hasNoMap() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional .MapFieldEntry noMap = 2; - * @return The noMap. */ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getNoMap() { if (noMapBuilder_ == null) { - return noMap_ == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance() : noMap_; + return noMap_; } else { return noMapBuilder_.getMessage(); } @@ -1798,8 +1522,7 @@ public Builder setNoMap( */ public Builder mergeNoMap(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry value) { if (noMapBuilder_ == null) { - if (((bitField0_ & 0x00000002) != 0) && - noMap_ != null && + if (((bitField0_ & 0x00000002) == 0x00000002) && noMap_ != org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance()) { noMap_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.newBuilder(noMap_).mergeFrom(value).buildPartial(); @@ -1818,7 +1541,7 @@ public Builder mergeNoMap(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Map */ public Builder clearNoMap() { if (noMapBuilder_ == null) { - noMap_ = null; + noMap_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance(); onChanged(); } else { noMapBuilder_.clear(); @@ -1841,20 +1564,19 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder if (noMapBuilder_ != null) { return noMapBuilder_.getMessageOrBuilder(); } else { - return noMap_ == null ? - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.getDefaultInstance() : noMap_; + return noMap_; } } /** * optional .MapFieldEntry noMap = 2; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder> getNoMapFieldBuilder() { if (noMapBuilder_ == null) { - noMapBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + noMapBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder>( - getNoMap(), + noMap_, getParentForChildren(), isClean()); noMap_ = null; @@ -1862,312 +1584,242 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder return noMapBuilder_; } - private com.google.protobuf.Internal.IntList intList_ = emptyIntList(); + // repeated int32 intList = 3; + private java.util.List intList_ = java.util.Collections.emptyList(); private void ensureIntListIsMutable() { - if (!((bitField0_ & 0x00000004) != 0)) { - intList_ = mutableCopy(intList_); + if (!((bitField0_ & 0x00000004) == 0x00000004)) { + intList_ = new java.util.ArrayList(intList_); bitField0_ |= 0x00000004; } } /** * repeated int32 intList = 3; - * @return A list containing the intList. */ public java.util.List getIntListList() { - return ((bitField0_ & 0x00000004) != 0) ? - java.util.Collections.unmodifiableList(intList_) : intList_; + return java.util.Collections.unmodifiableList(intList_); } /** * repeated int32 intList = 3; - * @return The count of intList. */ public int getIntListCount() { return intList_.size(); } /** * repeated int32 intList = 3; - * @param index The index of the element to return. - * @return The intList at the given index. */ public int getIntList(int index) { - return intList_.getInt(index); + return intList_.get(index); } /** * repeated int32 intList = 3; - * @param index The index to set the value at. - * @param value The intList to set. - * @return This builder for chaining. */ public Builder setIntList( int index, int value) { ensureIntListIsMutable(); - intList_.setInt(index, value); + intList_.set(index, value); onChanged(); return this; } /** * repeated int32 intList = 3; - * @param value The intList to add. - * @return This builder for chaining. */ public Builder addIntList(int value) { ensureIntListIsMutable(); - intList_.addInt(value); + intList_.add(value); onChanged(); return this; } /** * repeated int32 intList = 3; - * @param values The intList to add. - * @return This builder for chaining. */ public Builder addAllIntList( java.lang.Iterable values) { ensureIntListIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, intList_); + super.addAll(values, intList_); onChanged(); return this; } /** * repeated int32 intList = 3; - * @return This builder for chaining. */ public Builder clearIntList() { - intList_ = emptyIntList(); + intList_ = java.util.Collections.emptyList(); bitField0_ = (bitField0_ & ~0x00000004); onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:Mesg1) } - // @@protoc_insertion_point(class_scope:Mesg1) - private static final org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1(); - } - - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public Mesg1 parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new Mesg1(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new Mesg1(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:Mesg1) } - public interface AllTypesOrBuilder extends - // @@protoc_insertion_point(interface_extends:AllTypes) - com.google.protobuf.MessageOrBuilder { + public interface AllTypesOrBuilder + extends com.google.protobuf.MessageOrBuilder { + // optional double doubleType = 1; /** * optional double doubleType = 1; - * @return Whether the doubleType field is set. */ boolean hasDoubleType(); /** * optional double doubleType = 1; - * @return The doubleType. */ double getDoubleType(); + // optional float floatType = 2; /** * optional float floatType = 2; - * @return Whether the floatType field is set. */ boolean hasFloatType(); /** * optional float floatType = 2; - * @return The floatType. */ float getFloatType(); + // optional int32 int32Type = 3; /** * optional int32 int32Type = 3; - * @return Whether the int32Type field is set. */ boolean hasInt32Type(); /** * optional int32 int32Type = 3; - * @return The int32Type. */ int getInt32Type(); + // optional int64 int64Type = 4; /** * optional int64 int64Type = 4; - * @return Whether the int64Type field is set. */ boolean hasInt64Type(); /** * optional int64 int64Type = 4; - * @return The int64Type. */ long getInt64Type(); + // optional uint32 uint32Type = 5; /** * optional uint32 uint32Type = 5; - * @return Whether the uint32Type field is set. */ boolean hasUint32Type(); /** * optional uint32 uint32Type = 5; - * @return The uint32Type. */ int getUint32Type(); + // optional uint64 uint64Type = 6; /** * optional uint64 uint64Type = 6; - * @return Whether the uint64Type field is set. */ boolean hasUint64Type(); /** * optional uint64 uint64Type = 6; - * @return The uint64Type. */ long getUint64Type(); + // optional sint32 sint32Type = 7; /** * optional sint32 sint32Type = 7; - * @return Whether the sint32Type field is set. */ boolean hasSint32Type(); /** * optional sint32 sint32Type = 7; - * @return The sint32Type. */ int getSint32Type(); + // optional sint64 sint64Type = 8; /** * optional sint64 sint64Type = 8; - * @return Whether the sint64Type field is set. */ boolean hasSint64Type(); /** * optional sint64 sint64Type = 8; - * @return The sint64Type. */ long getSint64Type(); + // optional fixed32 fixed32Type = 9; /** * optional fixed32 fixed32Type = 9; - * @return Whether the fixed32Type field is set. */ boolean hasFixed32Type(); /** * optional fixed32 fixed32Type = 9; - * @return The fixed32Type. */ int getFixed32Type(); + // optional fixed64 fixed64Type = 10; /** * optional fixed64 fixed64Type = 10; - * @return Whether the fixed64Type field is set. */ boolean hasFixed64Type(); /** * optional fixed64 fixed64Type = 10; - * @return The fixed64Type. */ long getFixed64Type(); + // optional sfixed32 sfixed32Type = 11; /** * optional sfixed32 sfixed32Type = 11; - * @return Whether the sfixed32Type field is set. */ boolean hasSfixed32Type(); /** * optional sfixed32 sfixed32Type = 11; - * @return The sfixed32Type. */ int getSfixed32Type(); + // optional sfixed64 sfixed64Type = 12; /** * optional sfixed64 sfixed64Type = 12; - * @return Whether the sfixed64Type field is set. */ boolean hasSfixed64Type(); /** * optional sfixed64 sfixed64Type = 12; - * @return The sfixed64Type. */ long getSfixed64Type(); + // optional bool boolType = 13; /** * optional bool boolType = 13; - * @return Whether the boolType field is set. */ boolean hasBoolType(); /** * optional bool boolType = 13; - * @return The boolType. */ boolean getBoolType(); + // optional string stringType = 14; /** * optional string stringType = 14; - * @return Whether the stringType field is set. */ boolean hasStringType(); /** * optional string stringType = 14; - * @return The stringType. */ java.lang.String getStringType(); /** * optional string stringType = 14; - * @return The bytes for stringType. */ com.google.protobuf.ByteString getStringTypeBytes(); + // optional bytes bytesType = 15; /** * optional bytes bytesType = 15; - * @return Whether the bytesType field is set. */ boolean hasBytesType(); /** * optional bytes bytesType = 15; - * @return The bytesType. */ com.google.protobuf.ByteString getBytesType(); + // repeated .MapFieldEntry mapType = 16; /** * repeated .MapFieldEntry mapType = 16; */ @@ -2192,39 +1844,33 @@ public interface AllTypesOrBuilder extends org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getMapTypeOrBuilder( int index); + // repeated string stringListType = 17; /** * repeated string stringListType = 17; - * @return A list containing the stringListType. */ java.util.List - getStringListTypeList(); + getStringListTypeList(); /** * repeated string stringListType = 17; - * @return The count of stringListType. */ int getStringListTypeCount(); /** * repeated string stringListType = 17; - * @param index The index of the element to return. - * @return The stringListType at the given index. */ java.lang.String getStringListType(int index); /** * repeated string stringListType = 17; - * @param index The index of the value to return. - * @return The bytes of the stringListType at the given index. */ com.google.protobuf.ByteString getStringListTypeBytes(int index); + // optional .Mesg1 messageType = 18; /** * optional .Mesg1 messageType = 18; - * @return Whether the messageType field is set. */ boolean hasMessageType(); /** * optional .Mesg1 messageType = 18; - * @return The messageType. */ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getMessageType(); /** @@ -2232,6 +1878,7 @@ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getMap */ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMessageTypeOrBuilder(); + // repeated .Mesg1 messageListType = 19; /** * repeated .Mesg1 messageListType = 19; */ @@ -2256,14 +1903,13 @@ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getMap org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMessageListTypeOrBuilder( int index); + // optional .AllTypes.Enum1 enumType = 20; /** * optional .AllTypes.Enum1 enumType = 20; - * @return Whether the enumType field is set. */ boolean hasEnumType(); /** * optional .AllTypes.Enum1 enumType = 20; - * @return The enumType. */ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 getEnumType(); } @@ -2271,43 +1917,35 @@ org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMessageList * Protobuf type {@code AllTypes} */ public static final class AllTypes extends - com.google.protobuf.GeneratedMessageV3 implements - // @@protoc_insertion_point(message_implements:AllTypes) - AllTypesOrBuilder { - private static final long serialVersionUID = 0L; + com.google.protobuf.GeneratedMessage + implements AllTypesOrBuilder { // Use AllTypes.newBuilder() to construct. - private AllTypes(com.google.protobuf.GeneratedMessageV3.Builder builder) { + private AllTypes(com.google.protobuf.GeneratedMessage.Builder builder) { super(builder); + this.unknownFields = builder.getUnknownFields(); } - private AllTypes() { - stringType_ = ""; - bytesType_ = com.google.protobuf.ByteString.EMPTY; - mapType_ = java.util.Collections.emptyList(); - stringListType_ = com.google.protobuf.LazyStringArrayList.EMPTY; - messageListType_ = java.util.Collections.emptyList(); - enumType_ = 1; + private AllTypes(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } + + private static final AllTypes defaultInstance; + public static AllTypes getDefaultInstance() { + return defaultInstance; } - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance( - UnusedPrivateParameter unused) { - return new AllTypes(); + public AllTypes getDefaultInstanceForType() { + return defaultInstance; } + private final com.google.protobuf.UnknownFieldSet unknownFields; @java.lang.Override public final com.google.protobuf.UnknownFieldSet - getUnknownFields() { + getUnknownFields() { return this.unknownFields; } private AllTypes( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } + initFields(); int mutable_bitField0_ = 0; com.google.protobuf.UnknownFieldSet.Builder unknownFields = com.google.protobuf.UnknownFieldSet.newBuilder(); @@ -2319,6 +1957,13 @@ private AllTypes( case 0: done = true; break; + default: { + if (!parseUnknownField(input, unknownFields, + extensionRegistry, tag)) { + done = true; + } + break; + } case 9: { bitField0_ |= 0x00000001; doubleType_ = input.readDouble(); @@ -2385,9 +2030,8 @@ private AllTypes( break; } case 114: { - com.google.protobuf.ByteString bs = input.readBytes(); bitField0_ |= 0x00002000; - stringType_ = bs; + stringType_ = input.readBytes(); break; } case 122: { @@ -2396,26 +2040,24 @@ private AllTypes( break; } case 130: { - if (!((mutable_bitField0_ & 0x00008000) != 0)) { + if (!((mutable_bitField0_ & 0x00008000) == 0x00008000)) { mapType_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00008000; } - mapType_.add( - input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.PARSER, extensionRegistry)); + mapType_.add(input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.PARSER, extensionRegistry)); break; } case 138: { - com.google.protobuf.ByteString bs = input.readBytes(); - if (!((mutable_bitField0_ & 0x00010000) != 0)) { + if (!((mutable_bitField0_ & 0x00010000) == 0x00010000)) { stringListType_ = new com.google.protobuf.LazyStringArrayList(); mutable_bitField0_ |= 0x00010000; } - stringListType_.add(bs); + stringListType_.add(input.readBytes()); break; } case 146: { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder subBuilder = null; - if (((bitField0_ & 0x00008000) != 0)) { + if (((bitField0_ & 0x00008000) == 0x00008000)) { subBuilder = messageType_.toBuilder(); } messageType_ = input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.PARSER, extensionRegistry); @@ -2427,30 +2069,21 @@ private AllTypes( break; } case 154: { - if (!((mutable_bitField0_ & 0x00040000) != 0)) { + if (!((mutable_bitField0_ & 0x00040000) == 0x00040000)) { messageListType_ = new java.util.ArrayList(); mutable_bitField0_ |= 0x00040000; } - messageListType_.add( - input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.PARSER, extensionRegistry)); + messageListType_.add(input.readMessage(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.PARSER, extensionRegistry)); break; } case 160: { int rawValue = input.readEnum(); - @SuppressWarnings("deprecation") org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 value = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.valueOf(rawValue); if (value == null) { unknownFields.mergeVarintField(20, rawValue); } else { bitField0_ |= 0x00010000; - enumType_ = rawValue; - } - break; - } - default: { - if (!parseUnknownField( - input, unknownFields, extensionRegistry, tag)) { - done = true; + enumType_ = value; } break; } @@ -2458,19 +2091,17 @@ private AllTypes( } } catch (com.google.protobuf.InvalidProtocolBufferException e) { throw e.setUnfinishedMessage(this); - } catch (com.google.protobuf.UninitializedMessageException e) { - throw e.asInvalidProtocolBufferException().setUnfinishedMessage(this); } catch (java.io.IOException e) { throw new com.google.protobuf.InvalidProtocolBufferException( - e).setUnfinishedMessage(this); + e.getMessage()).setUnfinishedMessage(this); } finally { - if (((mutable_bitField0_ & 0x00008000) != 0)) { + if (((mutable_bitField0_ & 0x00008000) == 0x00008000)) { mapType_ = java.util.Collections.unmodifiableList(mapType_); } - if (((mutable_bitField0_ & 0x00010000) != 0)) { - stringListType_ = stringListType_.getUnmodifiableView(); + if (((mutable_bitField0_ & 0x00010000) == 0x00010000)) { + stringListType_ = new com.google.protobuf.UnmodifiableLazyStringList(stringListType_); } - if (((mutable_bitField0_ & 0x00040000) != 0)) { + if (((mutable_bitField0_ & 0x00040000) == 0x00040000)) { messageListType_ = java.util.Collections.unmodifiableList(messageListType_); } this.unknownFields = unknownFields.build(); @@ -2482,14 +2113,28 @@ private AllTypes( return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_AllTypes_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_AllTypes_fieldAccessorTable .ensureFieldAccessorsInitialized( org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.class, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Builder.class); } + public static com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + public AllTypes parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return new AllTypes(input, extensionRegistry); + } + }; + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + /** * Protobuf enum {@code AllTypes.Enum1} */ @@ -2498,11 +2143,11 @@ public enum Enum1 /** * VAL1 = 1; */ - VAL1(1), + VAL1(0, 1), /** * VAL2 = 2; */ - VAL2(2), + VAL2(1, 2), ; /** @@ -2515,25 +2160,9 @@ public enum Enum1 public static final int VAL2_VALUE = 2; - public final int getNumber() { - return value; - } + public final int getNumber() { return value; } - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated public static Enum1 valueOf(int value) { - return forNumber(value); - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - */ - public static Enum1 forNumber(int value) { switch (value) { case 1: return VAL1; case 2: return VAL2; @@ -2545,17 +2174,17 @@ public static Enum1 forNumber(int value) { internalGetValueMap() { return internalValueMap; } - private static final com.google.protobuf.Internal.EnumLiteMap< - Enum1> internalValueMap = + private static com.google.protobuf.Internal.EnumLiteMap + internalValueMap = new com.google.protobuf.Internal.EnumLiteMap() { public Enum1 findValueByNumber(int number) { - return Enum1.forNumber(number); + return Enum1.valueOf(number); } }; public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { - return getDescriptor().getValues().get(ordinal()); + return getDescriptor().getValues().get(index); } public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { @@ -2577,9 +2206,11 @@ public static Enum1 valueOf( return VALUES[desc.getIndex()]; } + private final int index; private final int value; - private Enum1(int value) { + private Enum1(int index, int value) { + this.index = index; this.value = value; } @@ -2587,268 +2218,226 @@ private Enum1(int value) { } private int bitField0_; + // optional double doubleType = 1; public static final int DOUBLETYPE_FIELD_NUMBER = 1; private double doubleType_; /** * optional double doubleType = 1; - * @return Whether the doubleType field is set. */ - @java.lang.Override public boolean hasDoubleType() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional double doubleType = 1; - * @return The doubleType. */ - @java.lang.Override public double getDoubleType() { return doubleType_; } + // optional float floatType = 2; public static final int FLOATTYPE_FIELD_NUMBER = 2; private float floatType_; /** * optional float floatType = 2; - * @return Whether the floatType field is set. */ - @java.lang.Override public boolean hasFloatType() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional float floatType = 2; - * @return The floatType. */ - @java.lang.Override public float getFloatType() { return floatType_; } + // optional int32 int32Type = 3; public static final int INT32TYPE_FIELD_NUMBER = 3; private int int32Type_; /** * optional int32 int32Type = 3; - * @return Whether the int32Type field is set. */ - @java.lang.Override public boolean hasInt32Type() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 int32Type = 3; - * @return The int32Type. */ - @java.lang.Override public int getInt32Type() { return int32Type_; } + // optional int64 int64Type = 4; public static final int INT64TYPE_FIELD_NUMBER = 4; private long int64Type_; /** * optional int64 int64Type = 4; - * @return Whether the int64Type field is set. */ - @java.lang.Override public boolean hasInt64Type() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int64 int64Type = 4; - * @return The int64Type. */ - @java.lang.Override public long getInt64Type() { return int64Type_; } + // optional uint32 uint32Type = 5; public static final int UINT32TYPE_FIELD_NUMBER = 5; private int uint32Type_; /** * optional uint32 uint32Type = 5; - * @return Whether the uint32Type field is set. */ - @java.lang.Override public boolean hasUint32Type() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint32 uint32Type = 5; - * @return The uint32Type. */ - @java.lang.Override public int getUint32Type() { return uint32Type_; } + // optional uint64 uint64Type = 6; public static final int UINT64TYPE_FIELD_NUMBER = 6; private long uint64Type_; /** * optional uint64 uint64Type = 6; - * @return Whether the uint64Type field is set. */ - @java.lang.Override public boolean hasUint64Type() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 uint64Type = 6; - * @return The uint64Type. */ - @java.lang.Override public long getUint64Type() { return uint64Type_; } + // optional sint32 sint32Type = 7; public static final int SINT32TYPE_FIELD_NUMBER = 7; private int sint32Type_; /** * optional sint32 sint32Type = 7; - * @return Whether the sint32Type field is set. */ - @java.lang.Override public boolean hasSint32Type() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional sint32 sint32Type = 7; - * @return The sint32Type. */ - @java.lang.Override public int getSint32Type() { return sint32Type_; } + // optional sint64 sint64Type = 8; public static final int SINT64TYPE_FIELD_NUMBER = 8; private long sint64Type_; /** * optional sint64 sint64Type = 8; - * @return Whether the sint64Type field is set. */ - @java.lang.Override public boolean hasSint64Type() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional sint64 sint64Type = 8; - * @return The sint64Type. */ - @java.lang.Override public long getSint64Type() { return sint64Type_; } + // optional fixed32 fixed32Type = 9; public static final int FIXED32TYPE_FIELD_NUMBER = 9; private int fixed32Type_; /** * optional fixed32 fixed32Type = 9; - * @return Whether the fixed32Type field is set. */ - @java.lang.Override public boolean hasFixed32Type() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional fixed32 fixed32Type = 9; - * @return The fixed32Type. */ - @java.lang.Override public int getFixed32Type() { return fixed32Type_; } + // optional fixed64 fixed64Type = 10; public static final int FIXED64TYPE_FIELD_NUMBER = 10; private long fixed64Type_; /** * optional fixed64 fixed64Type = 10; - * @return Whether the fixed64Type field is set. */ - @java.lang.Override public boolean hasFixed64Type() { - return ((bitField0_ & 0x00000200) != 0); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional fixed64 fixed64Type = 10; - * @return The fixed64Type. */ - @java.lang.Override public long getFixed64Type() { return fixed64Type_; } + // optional sfixed32 sfixed32Type = 11; public static final int SFIXED32TYPE_FIELD_NUMBER = 11; private int sfixed32Type_; /** * optional sfixed32 sfixed32Type = 11; - * @return Whether the sfixed32Type field is set. */ - @java.lang.Override public boolean hasSfixed32Type() { - return ((bitField0_ & 0x00000400) != 0); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional sfixed32 sfixed32Type = 11; - * @return The sfixed32Type. */ - @java.lang.Override public int getSfixed32Type() { return sfixed32Type_; } + // optional sfixed64 sfixed64Type = 12; public static final int SFIXED64TYPE_FIELD_NUMBER = 12; private long sfixed64Type_; /** * optional sfixed64 sfixed64Type = 12; - * @return Whether the sfixed64Type field is set. */ - @java.lang.Override public boolean hasSfixed64Type() { - return ((bitField0_ & 0x00000800) != 0); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional sfixed64 sfixed64Type = 12; - * @return The sfixed64Type. */ - @java.lang.Override public long getSfixed64Type() { return sfixed64Type_; } + // optional bool boolType = 13; public static final int BOOLTYPE_FIELD_NUMBER = 13; private boolean boolType_; /** * optional bool boolType = 13; - * @return Whether the boolType field is set. */ - @java.lang.Override public boolean hasBoolType() { - return ((bitField0_ & 0x00001000) != 0); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional bool boolType = 13; - * @return The boolType. */ - @java.lang.Override public boolean getBoolType() { return boolType_; } + // optional string stringType = 14; public static final int STRINGTYPE_FIELD_NUMBER = 14; - private volatile java.lang.Object stringType_; + private java.lang.Object stringType_; /** * optional string stringType = 14; - * @return Whether the stringType field is set. */ - @java.lang.Override public boolean hasStringType() { - return ((bitField0_ & 0x00002000) != 0); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional string stringType = 14; - * @return The stringType. */ - @java.lang.Override public java.lang.String getStringType() { java.lang.Object ref = stringType_; if (ref instanceof java.lang.String) { @@ -2865,9 +2454,7 @@ public java.lang.String getStringType() { } /** * optional string stringType = 14; - * @return The bytes for stringType. */ - @java.lang.Override public com.google.protobuf.ByteString getStringTypeBytes() { java.lang.Object ref = stringType_; @@ -2882,38 +2469,34 @@ public java.lang.String getStringType() { } } + // optional bytes bytesType = 15; public static final int BYTESTYPE_FIELD_NUMBER = 15; private com.google.protobuf.ByteString bytesType_; /** * optional bytes bytesType = 15; - * @return Whether the bytesType field is set. */ - @java.lang.Override public boolean hasBytesType() { - return ((bitField0_ & 0x00004000) != 0); + return ((bitField0_ & 0x00004000) == 0x00004000); } /** * optional bytes bytesType = 15; - * @return The bytesType. */ - @java.lang.Override public com.google.protobuf.ByteString getBytesType() { return bytesType_; } + // repeated .MapFieldEntry mapType = 16; public static final int MAPTYPE_FIELD_NUMBER = 16; private java.util.List mapType_; /** * repeated .MapFieldEntry mapType = 16; */ - @java.lang.Override public java.util.List getMapTypeList() { return mapType_; } /** * repeated .MapFieldEntry mapType = 16; */ - @java.lang.Override public java.util.List getMapTypeOrBuilderList() { return mapType_; @@ -2921,100 +2504,87 @@ public java.util.Listrepeated .MapFieldEntry mapType = 16; */ - @java.lang.Override public int getMapTypeCount() { return mapType_.size(); } /** * repeated .MapFieldEntry mapType = 16; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry getMapType(int index) { return mapType_.get(index); } /** * repeated .MapFieldEntry mapType = 16; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder getMapTypeOrBuilder( int index) { return mapType_.get(index); } + // repeated string stringListType = 17; public static final int STRINGLISTTYPE_FIELD_NUMBER = 17; private com.google.protobuf.LazyStringList stringListType_; /** * repeated string stringListType = 17; - * @return A list containing the stringListType. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getStringListTypeList() { return stringListType_; } /** * repeated string stringListType = 17; - * @return The count of stringListType. */ public int getStringListTypeCount() { return stringListType_.size(); } /** * repeated string stringListType = 17; - * @param index The index of the element to return. - * @return The stringListType at the given index. */ public java.lang.String getStringListType(int index) { return stringListType_.get(index); } /** * repeated string stringListType = 17; - * @param index The index of the value to return. - * @return The bytes of the stringListType at the given index. */ public com.google.protobuf.ByteString getStringListTypeBytes(int index) { return stringListType_.getByteString(index); } + // optional .Mesg1 messageType = 18; public static final int MESSAGETYPE_FIELD_NUMBER = 18; private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 messageType_; /** * optional .Mesg1 messageType = 18; - * @return Whether the messageType field is set. */ - @java.lang.Override public boolean hasMessageType() { - return ((bitField0_ & 0x00008000) != 0); + return ((bitField0_ & 0x00008000) == 0x00008000); } /** * optional .Mesg1 messageType = 18; - * @return The messageType. */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getMessageType() { - return messageType_ == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance() : messageType_; + return messageType_; } /** * optional .Mesg1 messageType = 18; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMessageTypeOrBuilder() { - return messageType_ == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance() : messageType_; + return messageType_; } + // repeated .Mesg1 messageListType = 19; public static final int MESSAGELISTTYPE_FIELD_NUMBER = 19; private java.util.List messageListType_; /** * repeated .Mesg1 messageListType = 19; */ - @java.lang.Override public java.util.List getMessageListTypeList() { return messageListType_; } /** * repeated .Mesg1 messageListType = 19; */ - @java.lang.Override public java.util.List getMessageListTypeOrBuilderList() { return messageListType_; @@ -3022,184 +2592,199 @@ public java.util.List /** * repeated .Mesg1 messageListType = 19; */ - @java.lang.Override public int getMessageListTypeCount() { return messageListType_.size(); } /** * repeated .Mesg1 messageListType = 19; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getMessageListType(int index) { return messageListType_.get(index); } /** * repeated .Mesg1 messageListType = 19; */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMessageListTypeOrBuilder( int index) { return messageListType_.get(index); } + // optional .AllTypes.Enum1 enumType = 20; public static final int ENUMTYPE_FIELD_NUMBER = 20; - private int enumType_; + private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 enumType_; /** * optional .AllTypes.Enum1 enumType = 20; - * @return Whether the enumType field is set. */ - @java.lang.Override public boolean hasEnumType() { - return ((bitField0_ & 0x00010000) != 0); + public boolean hasEnumType() { + return ((bitField0_ & 0x00010000) == 0x00010000); } /** * optional .AllTypes.Enum1 enumType = 20; - * @return The enumType. */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 getEnumType() { - @SuppressWarnings("deprecation") - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 result = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.valueOf(enumType_); - return result == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.VAL1 : result; + public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 getEnumType() { + return enumType_; + } + + private void initFields() { + doubleType_ = 0D; + floatType_ = 0F; + int32Type_ = 0; + int64Type_ = 0L; + uint32Type_ = 0; + uint64Type_ = 0L; + sint32Type_ = 0; + sint64Type_ = 0L; + fixed32Type_ = 0; + fixed64Type_ = 0L; + sfixed32Type_ = 0; + sfixed64Type_ = 0L; + boolType_ = false; + stringType_ = ""; + bytesType_ = com.google.protobuf.ByteString.EMPTY; + mapType_ = java.util.Collections.emptyList(); + stringListType_ = com.google.protobuf.LazyStringArrayList.EMPTY; + messageType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance(); + messageListType_ = java.util.Collections.emptyList(); + enumType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.VAL1; } - private byte memoizedIsInitialized = -1; - @java.lang.Override public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; + if (isInitialized != -1) return isInitialized == 1; memoizedIsInitialized = 1; return true; } - @java.lang.Override public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (((bitField0_ & 0x00000001) != 0)) { + getSerializedSize(); + if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeDouble(1, doubleType_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeFloat(2, floatType_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, int32Type_); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { output.writeInt64(4, int64Type_); } - if (((bitField0_ & 0x00000010) != 0)) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { output.writeUInt32(5, uint32Type_); } - if (((bitField0_ & 0x00000020) != 0)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { output.writeUInt64(6, uint64Type_); } - if (((bitField0_ & 0x00000040) != 0)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { output.writeSInt32(7, sint32Type_); } - if (((bitField0_ & 0x00000080) != 0)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { output.writeSInt64(8, sint64Type_); } - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { output.writeFixed32(9, fixed32Type_); } - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { output.writeFixed64(10, fixed64Type_); } - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { output.writeSFixed32(11, sfixed32Type_); } - if (((bitField0_ & 0x00000800) != 0)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { output.writeSFixed64(12, sfixed64Type_); } - if (((bitField0_ & 0x00001000) != 0)) { + if (((bitField0_ & 0x00001000) == 0x00001000)) { output.writeBool(13, boolType_); } - if (((bitField0_ & 0x00002000) != 0)) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 14, stringType_); + if (((bitField0_ & 0x00002000) == 0x00002000)) { + output.writeBytes(14, getStringTypeBytes()); } - if (((bitField0_ & 0x00004000) != 0)) { + if (((bitField0_ & 0x00004000) == 0x00004000)) { output.writeBytes(15, bytesType_); } for (int i = 0; i < mapType_.size(); i++) { output.writeMessage(16, mapType_.get(i)); } for (int i = 0; i < stringListType_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 17, stringListType_.getRaw(i)); + output.writeBytes(17, stringListType_.getByteString(i)); } - if (((bitField0_ & 0x00008000) != 0)) { - output.writeMessage(18, getMessageType()); + if (((bitField0_ & 0x00008000) == 0x00008000)) { + output.writeMessage(18, messageType_); } for (int i = 0; i < messageListType_.size(); i++) { output.writeMessage(19, messageListType_.get(i)); } - if (((bitField0_ & 0x00010000) != 0)) { - output.writeEnum(20, enumType_); + if (((bitField0_ & 0x00010000) == 0x00010000)) { + output.writeEnum(20, enumType_.getNumber()); } - unknownFields.writeTo(output); + getUnknownFields().writeTo(output); } - @java.lang.Override + private int memoizedSerializedSize = -1; public int getSerializedSize() { - int size = memoizedSize; + int size = memoizedSerializedSize; if (size != -1) return size; size = 0; - if (((bitField0_ & 0x00000001) != 0)) { + if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeDoubleSize(1, doubleType_); } - if (((bitField0_ & 0x00000002) != 0)) { + if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeFloatSize(2, floatType_); } - if (((bitField0_ & 0x00000004) != 0)) { + if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, int32Type_); } - if (((bitField0_ & 0x00000008) != 0)) { + if (((bitField0_ & 0x00000008) == 0x00000008)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(4, int64Type_); } - if (((bitField0_ & 0x00000010) != 0)) { + if (((bitField0_ & 0x00000010) == 0x00000010)) { size += com.google.protobuf.CodedOutputStream .computeUInt32Size(5, uint32Type_); } - if (((bitField0_ & 0x00000020) != 0)) { + if (((bitField0_ & 0x00000020) == 0x00000020)) { size += com.google.protobuf.CodedOutputStream .computeUInt64Size(6, uint64Type_); } - if (((bitField0_ & 0x00000040) != 0)) { + if (((bitField0_ & 0x00000040) == 0x00000040)) { size += com.google.protobuf.CodedOutputStream .computeSInt32Size(7, sint32Type_); } - if (((bitField0_ & 0x00000080) != 0)) { + if (((bitField0_ & 0x00000080) == 0x00000080)) { size += com.google.protobuf.CodedOutputStream .computeSInt64Size(8, sint64Type_); } - if (((bitField0_ & 0x00000100) != 0)) { + if (((bitField0_ & 0x00000100) == 0x00000100)) { size += com.google.protobuf.CodedOutputStream .computeFixed32Size(9, fixed32Type_); } - if (((bitField0_ & 0x00000200) != 0)) { + if (((bitField0_ & 0x00000200) == 0x00000200)) { size += com.google.protobuf.CodedOutputStream .computeFixed64Size(10, fixed64Type_); } - if (((bitField0_ & 0x00000400) != 0)) { + if (((bitField0_ & 0x00000400) == 0x00000400)) { size += com.google.protobuf.CodedOutputStream .computeSFixed32Size(11, sfixed32Type_); } - if (((bitField0_ & 0x00000800) != 0)) { + if (((bitField0_ & 0x00000800) == 0x00000800)) { size += com.google.protobuf.CodedOutputStream .computeSFixed64Size(12, sfixed64Type_); } - if (((bitField0_ & 0x00001000) != 0)) { + if (((bitField0_ & 0x00001000) == 0x00001000)) { size += com.google.protobuf.CodedOutputStream .computeBoolSize(13, boolType_); } - if (((bitField0_ & 0x00002000) != 0)) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(14, stringType_); + if (((bitField0_ & 0x00002000) == 0x00002000)) { + size += com.google.protobuf.CodedOutputStream + .computeBytesSize(14, getStringTypeBytes()); } - if (((bitField0_ & 0x00004000) != 0)) { + if (((bitField0_ & 0x00004000) == 0x00004000)) { size += com.google.protobuf.CodedOutputStream .computeBytesSize(15, bytesType_); } @@ -3210,245 +2795,36 @@ public int getSerializedSize() { { int dataSize = 0; for (int i = 0; i < stringListType_.size(); i++) { - dataSize += computeStringSizeNoTag(stringListType_.getRaw(i)); + dataSize += com.google.protobuf.CodedOutputStream + .computeBytesSizeNoTag(stringListType_.getByteString(i)); } size += dataSize; size += 2 * getStringListTypeList().size(); } - if (((bitField0_ & 0x00008000) != 0)) { + if (((bitField0_ & 0x00008000) == 0x00008000)) { size += com.google.protobuf.CodedOutputStream - .computeMessageSize(18, getMessageType()); + .computeMessageSize(18, messageType_); } for (int i = 0; i < messageListType_.size(); i++) { size += com.google.protobuf.CodedOutputStream .computeMessageSize(19, messageListType_.get(i)); } - if (((bitField0_ & 0x00010000) != 0)) { + if (((bitField0_ & 0x00010000) == 0x00010000)) { size += com.google.protobuf.CodedOutputStream - .computeEnumSize(20, enumType_); + .computeEnumSize(20, enumType_.getNumber()); } - size += unknownFields.getSerializedSize(); - memoizedSize = size; + size += getUnknownFields().getSerializedSize(); + memoizedSerializedSize = size; return size; } + private static final long serialVersionUID = 0L; @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes)) { - return super.equals(obj); - } - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes other = (org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes) obj; - - if (hasDoubleType() != other.hasDoubleType()) return false; - if (hasDoubleType()) { - if (java.lang.Double.doubleToLongBits(getDoubleType()) - != java.lang.Double.doubleToLongBits( - other.getDoubleType())) return false; - } - if (hasFloatType() != other.hasFloatType()) return false; - if (hasFloatType()) { - if (java.lang.Float.floatToIntBits(getFloatType()) - != java.lang.Float.floatToIntBits( - other.getFloatType())) return false; - } - if (hasInt32Type() != other.hasInt32Type()) return false; - if (hasInt32Type()) { - if (getInt32Type() - != other.getInt32Type()) return false; - } - if (hasInt64Type() != other.hasInt64Type()) return false; - if (hasInt64Type()) { - if (getInt64Type() - != other.getInt64Type()) return false; - } - if (hasUint32Type() != other.hasUint32Type()) return false; - if (hasUint32Type()) { - if (getUint32Type() - != other.getUint32Type()) return false; - } - if (hasUint64Type() != other.hasUint64Type()) return false; - if (hasUint64Type()) { - if (getUint64Type() - != other.getUint64Type()) return false; - } - if (hasSint32Type() != other.hasSint32Type()) return false; - if (hasSint32Type()) { - if (getSint32Type() - != other.getSint32Type()) return false; - } - if (hasSint64Type() != other.hasSint64Type()) return false; - if (hasSint64Type()) { - if (getSint64Type() - != other.getSint64Type()) return false; - } - if (hasFixed32Type() != other.hasFixed32Type()) return false; - if (hasFixed32Type()) { - if (getFixed32Type() - != other.getFixed32Type()) return false; - } - if (hasFixed64Type() != other.hasFixed64Type()) return false; - if (hasFixed64Type()) { - if (getFixed64Type() - != other.getFixed64Type()) return false; - } - if (hasSfixed32Type() != other.hasSfixed32Type()) return false; - if (hasSfixed32Type()) { - if (getSfixed32Type() - != other.getSfixed32Type()) return false; - } - if (hasSfixed64Type() != other.hasSfixed64Type()) return false; - if (hasSfixed64Type()) { - if (getSfixed64Type() - != other.getSfixed64Type()) return false; - } - if (hasBoolType() != other.hasBoolType()) return false; - if (hasBoolType()) { - if (getBoolType() - != other.getBoolType()) return false; - } - if (hasStringType() != other.hasStringType()) return false; - if (hasStringType()) { - if (!getStringType() - .equals(other.getStringType())) return false; - } - if (hasBytesType() != other.hasBytesType()) return false; - if (hasBytesType()) { - if (!getBytesType() - .equals(other.getBytesType())) return false; - } - if (!getMapTypeList() - .equals(other.getMapTypeList())) return false; - if (!getStringListTypeList() - .equals(other.getStringListTypeList())) return false; - if (hasMessageType() != other.hasMessageType()) return false; - if (hasMessageType()) { - if (!getMessageType() - .equals(other.getMessageType())) return false; - } - if (!getMessageListTypeList() - .equals(other.getMessageListTypeList())) return false; - if (hasEnumType() != other.hasEnumType()) return false; - if (hasEnumType()) { - if (enumType_ != other.enumType_) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; + protected java.lang.Object writeReplace() + throws java.io.ObjectStreamException { + return super.writeReplace(); } - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasDoubleType()) { - hash = (37 * hash) + DOUBLETYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - java.lang.Double.doubleToLongBits(getDoubleType())); - } - if (hasFloatType()) { - hash = (37 * hash) + FLOATTYPE_FIELD_NUMBER; - hash = (53 * hash) + java.lang.Float.floatToIntBits( - getFloatType()); - } - if (hasInt32Type()) { - hash = (37 * hash) + INT32TYPE_FIELD_NUMBER; - hash = (53 * hash) + getInt32Type(); - } - if (hasInt64Type()) { - hash = (37 * hash) + INT64TYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getInt64Type()); - } - if (hasUint32Type()) { - hash = (37 * hash) + UINT32TYPE_FIELD_NUMBER; - hash = (53 * hash) + getUint32Type(); - } - if (hasUint64Type()) { - hash = (37 * hash) + UINT64TYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getUint64Type()); - } - if (hasSint32Type()) { - hash = (37 * hash) + SINT32TYPE_FIELD_NUMBER; - hash = (53 * hash) + getSint32Type(); - } - if (hasSint64Type()) { - hash = (37 * hash) + SINT64TYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getSint64Type()); - } - if (hasFixed32Type()) { - hash = (37 * hash) + FIXED32TYPE_FIELD_NUMBER; - hash = (53 * hash) + getFixed32Type(); - } - if (hasFixed64Type()) { - hash = (37 * hash) + FIXED64TYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getFixed64Type()); - } - if (hasSfixed32Type()) { - hash = (37 * hash) + SFIXED32TYPE_FIELD_NUMBER; - hash = (53 * hash) + getSfixed32Type(); - } - if (hasSfixed64Type()) { - hash = (37 * hash) + SFIXED64TYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong( - getSfixed64Type()); - } - if (hasBoolType()) { - hash = (37 * hash) + BOOLTYPE_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean( - getBoolType()); - } - if (hasStringType()) { - hash = (37 * hash) + STRINGTYPE_FIELD_NUMBER; - hash = (53 * hash) + getStringType().hashCode(); - } - if (hasBytesType()) { - hash = (37 * hash) + BYTESTYPE_FIELD_NUMBER; - hash = (53 * hash) + getBytesType().hashCode(); - } - if (getMapTypeCount() > 0) { - hash = (37 * hash) + MAPTYPE_FIELD_NUMBER; - hash = (53 * hash) + getMapTypeList().hashCode(); - } - if (getStringListTypeCount() > 0) { - hash = (37 * hash) + STRINGLISTTYPE_FIELD_NUMBER; - hash = (53 * hash) + getStringListTypeList().hashCode(); - } - if (hasMessageType()) { - hash = (37 * hash) + MESSAGETYPE_FIELD_NUMBER; - hash = (53 * hash) + getMessageType().hashCode(); - } - if (getMessageListTypeCount() > 0) { - hash = (37 * hash) + MESSAGELISTTYPE_FIELD_NUMBER; - hash = (53 * hash) + getMessageListTypeList().hashCode(); - } - if (hasEnumType()) { - hash = (37 * hash) + ENUMTYPE_FIELD_NUMBER; - hash = (53 * hash) + enumType_; - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom( - java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom( - java.nio.ByteBuffer data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3472,59 +2848,46 @@ public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseF } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input); + return PARSER.parseDelimitedFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseDelimitedFrom( java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseDelimitedWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseDelimitedFrom(input, extensionRegistry); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom( com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input); + return PARSER.parseFrom(input); } public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes parseFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3 - .parseWithIOException(PARSER, input, extensionRegistry); + return PARSER.parseFrom(input, extensionRegistry); } - @java.lang.Override + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } public static Builder newBuilder(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE - ? new Builder() : new Builder().mergeFrom(this); + return newBuilder().mergeFrom(prototype); } + public Builder toBuilder() { return newBuilder(this); } @java.lang.Override protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { Builder builder = new Builder(parent); return builder; } @@ -3532,16 +2895,14 @@ protected Builder newBuilderForType( * Protobuf type {@code AllTypes} */ public static final class Builder extends - com.google.protobuf.GeneratedMessageV3.Builder implements - // @@protoc_insertion_point(builder_implements:AllTypes) - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypesOrBuilder { + com.google.protobuf.GeneratedMessage.Builder + implements org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypesOrBuilder { public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_AllTypes_descriptor; } - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_AllTypes_fieldAccessorTable .ensureFieldAccessorsInitialized( @@ -3554,19 +2915,21 @@ private Builder() { } private Builder( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3 - .alwaysUseFieldBuilders) { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { getMapTypeFieldBuilder(); getMessageTypeFieldBuilder(); getMessageListTypeFieldBuilder(); } } - @java.lang.Override + private static Builder create() { + return new Builder(); + } + public Builder clear() { super.clear(); doubleType_ = 0D; @@ -3608,7 +2971,7 @@ public Builder clear() { stringListType_ = com.google.protobuf.LazyStringArrayList.EMPTY; bitField0_ = (bitField0_ & ~0x00010000); if (messageTypeBuilder_ == null) { - messageType_ = null; + messageType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance(); } else { messageTypeBuilder_.clear(); } @@ -3619,23 +2982,24 @@ public Builder clear() { } else { messageListTypeBuilder_.clear(); } - enumType_ = 1; + enumType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.VAL1; bitField0_ = (bitField0_ & ~0x00080000); return this; } - @java.lang.Override + public Builder clone() { + return create().mergeFrom(buildPartial()); + } + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.internal_static_AllTypes_descriptor; } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes getDefaultInstanceForType() { return org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.getDefaultInstance(); } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes build() { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes result = buildPartial(); if (!result.isInitialized()) { @@ -3644,73 +3008,72 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes build() { return result; } - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes buildPartial() { org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes result = new org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) != 0)) { - result.doubleType_ = doubleType_; + if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } - if (((from_bitField0_ & 0x00000002) != 0)) { - result.floatType_ = floatType_; + result.doubleType_ = doubleType_; + if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } - if (((from_bitField0_ & 0x00000004) != 0)) { - result.int32Type_ = int32Type_; + result.floatType_ = floatType_; + if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } - if (((from_bitField0_ & 0x00000008) != 0)) { - result.int64Type_ = int64Type_; + result.int32Type_ = int32Type_; + if (((from_bitField0_ & 0x00000008) == 0x00000008)) { to_bitField0_ |= 0x00000008; } - if (((from_bitField0_ & 0x00000010) != 0)) { - result.uint32Type_ = uint32Type_; + result.int64Type_ = int64Type_; + if (((from_bitField0_ & 0x00000010) == 0x00000010)) { to_bitField0_ |= 0x00000010; } - if (((from_bitField0_ & 0x00000020) != 0)) { - result.uint64Type_ = uint64Type_; + result.uint32Type_ = uint32Type_; + if (((from_bitField0_ & 0x00000020) == 0x00000020)) { to_bitField0_ |= 0x00000020; } - if (((from_bitField0_ & 0x00000040) != 0)) { - result.sint32Type_ = sint32Type_; + result.uint64Type_ = uint64Type_; + if (((from_bitField0_ & 0x00000040) == 0x00000040)) { to_bitField0_ |= 0x00000040; } - if (((from_bitField0_ & 0x00000080) != 0)) { - result.sint64Type_ = sint64Type_; + result.sint32Type_ = sint32Type_; + if (((from_bitField0_ & 0x00000080) == 0x00000080)) { to_bitField0_ |= 0x00000080; } - if (((from_bitField0_ & 0x00000100) != 0)) { - result.fixed32Type_ = fixed32Type_; + result.sint64Type_ = sint64Type_; + if (((from_bitField0_ & 0x00000100) == 0x00000100)) { to_bitField0_ |= 0x00000100; } - if (((from_bitField0_ & 0x00000200) != 0)) { - result.fixed64Type_ = fixed64Type_; + result.fixed32Type_ = fixed32Type_; + if (((from_bitField0_ & 0x00000200) == 0x00000200)) { to_bitField0_ |= 0x00000200; } - if (((from_bitField0_ & 0x00000400) != 0)) { - result.sfixed32Type_ = sfixed32Type_; + result.fixed64Type_ = fixed64Type_; + if (((from_bitField0_ & 0x00000400) == 0x00000400)) { to_bitField0_ |= 0x00000400; } - if (((from_bitField0_ & 0x00000800) != 0)) { - result.sfixed64Type_ = sfixed64Type_; + result.sfixed32Type_ = sfixed32Type_; + if (((from_bitField0_ & 0x00000800) == 0x00000800)) { to_bitField0_ |= 0x00000800; } - if (((from_bitField0_ & 0x00001000) != 0)) { - result.boolType_ = boolType_; + result.sfixed64Type_ = sfixed64Type_; + if (((from_bitField0_ & 0x00001000) == 0x00001000)) { to_bitField0_ |= 0x00001000; } - if (((from_bitField0_ & 0x00002000) != 0)) { + result.boolType_ = boolType_; + if (((from_bitField0_ & 0x00002000) == 0x00002000)) { to_bitField0_ |= 0x00002000; } result.stringType_ = stringType_; - if (((from_bitField0_ & 0x00004000) != 0)) { + if (((from_bitField0_ & 0x00004000) == 0x00004000)) { to_bitField0_ |= 0x00004000; } result.bytesType_ = bytesType_; if (mapTypeBuilder_ == null) { - if (((bitField0_ & 0x00008000) != 0)) { + if (((bitField0_ & 0x00008000) == 0x00008000)) { mapType_ = java.util.Collections.unmodifiableList(mapType_); bitField0_ = (bitField0_ & ~0x00008000); } @@ -3718,21 +3081,22 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes buildPartial( } else { result.mapType_ = mapTypeBuilder_.build(); } - if (((bitField0_ & 0x00010000) != 0)) { - stringListType_ = stringListType_.getUnmodifiableView(); + if (((bitField0_ & 0x00010000) == 0x00010000)) { + stringListType_ = new com.google.protobuf.UnmodifiableLazyStringList( + stringListType_); bitField0_ = (bitField0_ & ~0x00010000); } result.stringListType_ = stringListType_; - if (((from_bitField0_ & 0x00020000) != 0)) { - if (messageTypeBuilder_ == null) { - result.messageType_ = messageType_; - } else { - result.messageType_ = messageTypeBuilder_.build(); - } + if (((from_bitField0_ & 0x00020000) == 0x00020000)) { to_bitField0_ |= 0x00008000; } + if (messageTypeBuilder_ == null) { + result.messageType_ = messageType_; + } else { + result.messageType_ = messageTypeBuilder_.build(); + } if (messageListTypeBuilder_ == null) { - if (((bitField0_ & 0x00040000) != 0)) { + if (((bitField0_ & 0x00040000) == 0x00040000)) { messageListType_ = java.util.Collections.unmodifiableList(messageListType_); bitField0_ = (bitField0_ & ~0x00040000); } @@ -3740,7 +3104,7 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes buildPartial( } else { result.messageListType_ = messageListTypeBuilder_.build(); } - if (((from_bitField0_ & 0x00080000) != 0)) { + if (((from_bitField0_ & 0x00080000) == 0x00080000)) { to_bitField0_ |= 0x00010000; } result.enumType_ = enumType_; @@ -3749,39 +3113,6 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes buildPartial( return result; } - @java.lang.Override - public Builder clone() { - return super.clone(); - } - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.setField(field, value); - } - @java.lang.Override - public Builder clearField( - com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - @java.lang.Override - public Builder clearOneof( - com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - java.lang.Object value) { - return super.addRepeatedField(field, value); - } - @java.lang.Override public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes) { return mergeFrom((org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes)other); @@ -3859,7 +3190,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllT mapType_ = other.mapType_; bitField0_ = (bitField0_ & ~0x00008000); mapTypeBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getMapTypeFieldBuilder() : null; } else { mapTypeBuilder_.addAllMessages(other.mapType_); @@ -3898,7 +3229,7 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllT messageListType_ = other.messageListType_; bitField0_ = (bitField0_ & ~0x00040000); messageListTypeBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders ? + com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getMessageListTypeFieldBuilder() : null; } else { messageListTypeBuilder_.addAllMessages(other.messageListType_); @@ -3908,17 +3239,14 @@ public Builder mergeFrom(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllT if (other.hasEnumType()) { setEnumType(other.getEnumType()); } - this.mergeUnknownFields(other.unknownFields); - onChanged(); + this.mergeUnknownFields(other.getUnknownFields()); return this; } - @java.lang.Override public final boolean isInitialized() { return true; } - @java.lang.Override public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3928,7 +3256,7 @@ public Builder mergeFrom( parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); } catch (com.google.protobuf.InvalidProtocolBufferException e) { parsedMessage = (org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes) e.getUnfinishedMessage(); - throw e.unwrapIOException(); + throw e; } finally { if (parsedMessage != null) { mergeFrom(parsedMessage); @@ -3938,27 +3266,22 @@ public Builder mergeFrom( } private int bitField0_; + // optional double doubleType = 1; private double doubleType_ ; /** * optional double doubleType = 1; - * @return Whether the doubleType field is set. */ - @java.lang.Override public boolean hasDoubleType() { - return ((bitField0_ & 0x00000001) != 0); + return ((bitField0_ & 0x00000001) == 0x00000001); } /** * optional double doubleType = 1; - * @return The doubleType. */ - @java.lang.Override public double getDoubleType() { return doubleType_; } /** * optional double doubleType = 1; - * @param value The doubleType to set. - * @return This builder for chaining. */ public Builder setDoubleType(double value) { bitField0_ |= 0x00000001; @@ -3968,7 +3291,6 @@ public Builder setDoubleType(double value) { } /** * optional double doubleType = 1; - * @return This builder for chaining. */ public Builder clearDoubleType() { bitField0_ = (bitField0_ & ~0x00000001); @@ -3977,27 +3299,22 @@ public Builder clearDoubleType() { return this; } + // optional float floatType = 2; private float floatType_ ; /** * optional float floatType = 2; - * @return Whether the floatType field is set. */ - @java.lang.Override public boolean hasFloatType() { - return ((bitField0_ & 0x00000002) != 0); + return ((bitField0_ & 0x00000002) == 0x00000002); } /** * optional float floatType = 2; - * @return The floatType. */ - @java.lang.Override public float getFloatType() { return floatType_; } /** * optional float floatType = 2; - * @param value The floatType to set. - * @return This builder for chaining. */ public Builder setFloatType(float value) { bitField0_ |= 0x00000002; @@ -4007,7 +3324,6 @@ public Builder setFloatType(float value) { } /** * optional float floatType = 2; - * @return This builder for chaining. */ public Builder clearFloatType() { bitField0_ = (bitField0_ & ~0x00000002); @@ -4016,27 +3332,22 @@ public Builder clearFloatType() { return this; } + // optional int32 int32Type = 3; private int int32Type_ ; /** * optional int32 int32Type = 3; - * @return Whether the int32Type field is set. */ - @java.lang.Override public boolean hasInt32Type() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000004) == 0x00000004); } /** * optional int32 int32Type = 3; - * @return The int32Type. */ - @java.lang.Override public int getInt32Type() { return int32Type_; } /** * optional int32 int32Type = 3; - * @param value The int32Type to set. - * @return This builder for chaining. */ public Builder setInt32Type(int value) { bitField0_ |= 0x00000004; @@ -4046,7 +3357,6 @@ public Builder setInt32Type(int value) { } /** * optional int32 int32Type = 3; - * @return This builder for chaining. */ public Builder clearInt32Type() { bitField0_ = (bitField0_ & ~0x00000004); @@ -4055,27 +3365,22 @@ public Builder clearInt32Type() { return this; } + // optional int64 int64Type = 4; private long int64Type_ ; /** * optional int64 int64Type = 4; - * @return Whether the int64Type field is set. */ - @java.lang.Override public boolean hasInt64Type() { - return ((bitField0_ & 0x00000008) != 0); + return ((bitField0_ & 0x00000008) == 0x00000008); } /** * optional int64 int64Type = 4; - * @return The int64Type. */ - @java.lang.Override public long getInt64Type() { return int64Type_; } /** * optional int64 int64Type = 4; - * @param value The int64Type to set. - * @return This builder for chaining. */ public Builder setInt64Type(long value) { bitField0_ |= 0x00000008; @@ -4085,7 +3390,6 @@ public Builder setInt64Type(long value) { } /** * optional int64 int64Type = 4; - * @return This builder for chaining. */ public Builder clearInt64Type() { bitField0_ = (bitField0_ & ~0x00000008); @@ -4094,27 +3398,22 @@ public Builder clearInt64Type() { return this; } + // optional uint32 uint32Type = 5; private int uint32Type_ ; /** * optional uint32 uint32Type = 5; - * @return Whether the uint32Type field is set. */ - @java.lang.Override public boolean hasUint32Type() { - return ((bitField0_ & 0x00000010) != 0); + return ((bitField0_ & 0x00000010) == 0x00000010); } /** * optional uint32 uint32Type = 5; - * @return The uint32Type. */ - @java.lang.Override public int getUint32Type() { return uint32Type_; } /** * optional uint32 uint32Type = 5; - * @param value The uint32Type to set. - * @return This builder for chaining. */ public Builder setUint32Type(int value) { bitField0_ |= 0x00000010; @@ -4124,7 +3423,6 @@ public Builder setUint32Type(int value) { } /** * optional uint32 uint32Type = 5; - * @return This builder for chaining. */ public Builder clearUint32Type() { bitField0_ = (bitField0_ & ~0x00000010); @@ -4133,27 +3431,22 @@ public Builder clearUint32Type() { return this; } + // optional uint64 uint64Type = 6; private long uint64Type_ ; /** * optional uint64 uint64Type = 6; - * @return Whether the uint64Type field is set. */ - @java.lang.Override public boolean hasUint64Type() { - return ((bitField0_ & 0x00000020) != 0); + return ((bitField0_ & 0x00000020) == 0x00000020); } /** * optional uint64 uint64Type = 6; - * @return The uint64Type. */ - @java.lang.Override public long getUint64Type() { return uint64Type_; } /** * optional uint64 uint64Type = 6; - * @param value The uint64Type to set. - * @return This builder for chaining. */ public Builder setUint64Type(long value) { bitField0_ |= 0x00000020; @@ -4163,7 +3456,6 @@ public Builder setUint64Type(long value) { } /** * optional uint64 uint64Type = 6; - * @return This builder for chaining. */ public Builder clearUint64Type() { bitField0_ = (bitField0_ & ~0x00000020); @@ -4172,27 +3464,22 @@ public Builder clearUint64Type() { return this; } + // optional sint32 sint32Type = 7; private int sint32Type_ ; /** * optional sint32 sint32Type = 7; - * @return Whether the sint32Type field is set. */ - @java.lang.Override public boolean hasSint32Type() { - return ((bitField0_ & 0x00000040) != 0); + return ((bitField0_ & 0x00000040) == 0x00000040); } /** * optional sint32 sint32Type = 7; - * @return The sint32Type. */ - @java.lang.Override public int getSint32Type() { return sint32Type_; } /** * optional sint32 sint32Type = 7; - * @param value The sint32Type to set. - * @return This builder for chaining. */ public Builder setSint32Type(int value) { bitField0_ |= 0x00000040; @@ -4202,7 +3489,6 @@ public Builder setSint32Type(int value) { } /** * optional sint32 sint32Type = 7; - * @return This builder for chaining. */ public Builder clearSint32Type() { bitField0_ = (bitField0_ & ~0x00000040); @@ -4211,27 +3497,22 @@ public Builder clearSint32Type() { return this; } + // optional sint64 sint64Type = 8; private long sint64Type_ ; /** * optional sint64 sint64Type = 8; - * @return Whether the sint64Type field is set. */ - @java.lang.Override public boolean hasSint64Type() { - return ((bitField0_ & 0x00000080) != 0); + return ((bitField0_ & 0x00000080) == 0x00000080); } /** * optional sint64 sint64Type = 8; - * @return The sint64Type. */ - @java.lang.Override public long getSint64Type() { return sint64Type_; } /** * optional sint64 sint64Type = 8; - * @param value The sint64Type to set. - * @return This builder for chaining. */ public Builder setSint64Type(long value) { bitField0_ |= 0x00000080; @@ -4241,7 +3522,6 @@ public Builder setSint64Type(long value) { } /** * optional sint64 sint64Type = 8; - * @return This builder for chaining. */ public Builder clearSint64Type() { bitField0_ = (bitField0_ & ~0x00000080); @@ -4250,27 +3530,22 @@ public Builder clearSint64Type() { return this; } + // optional fixed32 fixed32Type = 9; private int fixed32Type_ ; /** * optional fixed32 fixed32Type = 9; - * @return Whether the fixed32Type field is set. */ - @java.lang.Override public boolean hasFixed32Type() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000100) == 0x00000100); } /** * optional fixed32 fixed32Type = 9; - * @return The fixed32Type. */ - @java.lang.Override public int getFixed32Type() { return fixed32Type_; } /** * optional fixed32 fixed32Type = 9; - * @param value The fixed32Type to set. - * @return This builder for chaining. */ public Builder setFixed32Type(int value) { bitField0_ |= 0x00000100; @@ -4280,7 +3555,6 @@ public Builder setFixed32Type(int value) { } /** * optional fixed32 fixed32Type = 9; - * @return This builder for chaining. */ public Builder clearFixed32Type() { bitField0_ = (bitField0_ & ~0x00000100); @@ -4289,27 +3563,22 @@ public Builder clearFixed32Type() { return this; } + // optional fixed64 fixed64Type = 10; private long fixed64Type_ ; /** * optional fixed64 fixed64Type = 10; - * @return Whether the fixed64Type field is set. */ - @java.lang.Override public boolean hasFixed64Type() { - return ((bitField0_ & 0x00000200) != 0); + return ((bitField0_ & 0x00000200) == 0x00000200); } /** * optional fixed64 fixed64Type = 10; - * @return The fixed64Type. */ - @java.lang.Override public long getFixed64Type() { return fixed64Type_; } /** * optional fixed64 fixed64Type = 10; - * @param value The fixed64Type to set. - * @return This builder for chaining. */ public Builder setFixed64Type(long value) { bitField0_ |= 0x00000200; @@ -4319,7 +3588,6 @@ public Builder setFixed64Type(long value) { } /** * optional fixed64 fixed64Type = 10; - * @return This builder for chaining. */ public Builder clearFixed64Type() { bitField0_ = (bitField0_ & ~0x00000200); @@ -4328,27 +3596,22 @@ public Builder clearFixed64Type() { return this; } + // optional sfixed32 sfixed32Type = 11; private int sfixed32Type_ ; /** * optional sfixed32 sfixed32Type = 11; - * @return Whether the sfixed32Type field is set. */ - @java.lang.Override public boolean hasSfixed32Type() { - return ((bitField0_ & 0x00000400) != 0); + return ((bitField0_ & 0x00000400) == 0x00000400); } /** * optional sfixed32 sfixed32Type = 11; - * @return The sfixed32Type. */ - @java.lang.Override public int getSfixed32Type() { return sfixed32Type_; } /** * optional sfixed32 sfixed32Type = 11; - * @param value The sfixed32Type to set. - * @return This builder for chaining. */ public Builder setSfixed32Type(int value) { bitField0_ |= 0x00000400; @@ -4358,7 +3621,6 @@ public Builder setSfixed32Type(int value) { } /** * optional sfixed32 sfixed32Type = 11; - * @return This builder for chaining. */ public Builder clearSfixed32Type() { bitField0_ = (bitField0_ & ~0x00000400); @@ -4367,27 +3629,22 @@ public Builder clearSfixed32Type() { return this; } + // optional sfixed64 sfixed64Type = 12; private long sfixed64Type_ ; /** * optional sfixed64 sfixed64Type = 12; - * @return Whether the sfixed64Type field is set. */ - @java.lang.Override public boolean hasSfixed64Type() { - return ((bitField0_ & 0x00000800) != 0); + return ((bitField0_ & 0x00000800) == 0x00000800); } /** * optional sfixed64 sfixed64Type = 12; - * @return The sfixed64Type. */ - @java.lang.Override public long getSfixed64Type() { return sfixed64Type_; } /** * optional sfixed64 sfixed64Type = 12; - * @param value The sfixed64Type to set. - * @return This builder for chaining. */ public Builder setSfixed64Type(long value) { bitField0_ |= 0x00000800; @@ -4397,7 +3654,6 @@ public Builder setSfixed64Type(long value) { } /** * optional sfixed64 sfixed64Type = 12; - * @return This builder for chaining. */ public Builder clearSfixed64Type() { bitField0_ = (bitField0_ & ~0x00000800); @@ -4406,27 +3662,22 @@ public Builder clearSfixed64Type() { return this; } + // optional bool boolType = 13; private boolean boolType_ ; /** * optional bool boolType = 13; - * @return Whether the boolType field is set. */ - @java.lang.Override public boolean hasBoolType() { - return ((bitField0_ & 0x00001000) != 0); + return ((bitField0_ & 0x00001000) == 0x00001000); } /** * optional bool boolType = 13; - * @return The boolType. */ - @java.lang.Override public boolean getBoolType() { return boolType_; } /** * optional bool boolType = 13; - * @param value The boolType to set. - * @return This builder for chaining. */ public Builder setBoolType(boolean value) { bitField0_ |= 0x00001000; @@ -4436,7 +3687,6 @@ public Builder setBoolType(boolean value) { } /** * optional bool boolType = 13; - * @return This builder for chaining. */ public Builder clearBoolType() { bitField0_ = (bitField0_ & ~0x00001000); @@ -4445,27 +3695,23 @@ public Builder clearBoolType() { return this; } + // optional string stringType = 14; private java.lang.Object stringType_ = ""; /** * optional string stringType = 14; - * @return Whether the stringType field is set. */ public boolean hasStringType() { - return ((bitField0_ & 0x00002000) != 0); + return ((bitField0_ & 0x00002000) == 0x00002000); } /** * optional string stringType = 14; - * @return The stringType. */ public java.lang.String getStringType() { java.lang.Object ref = stringType_; if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = - (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - if (bs.isValidUtf8()) { - stringType_ = s; - } + java.lang.String s = ((com.google.protobuf.ByteString) ref) + .toStringUtf8(); + stringType_ = s; return s; } else { return (java.lang.String) ref; @@ -4473,7 +3719,6 @@ public java.lang.String getStringType() { } /** * optional string stringType = 14; - * @return The bytes for stringType. */ public com.google.protobuf.ByteString getStringTypeBytes() { @@ -4490,8 +3735,6 @@ public java.lang.String getStringType() { } /** * optional string stringType = 14; - * @param value The stringType to set. - * @return This builder for chaining. */ public Builder setStringType( java.lang.String value) { @@ -4505,7 +3748,6 @@ public Builder setStringType( } /** * optional string stringType = 14; - * @return This builder for chaining. */ public Builder clearStringType() { bitField0_ = (bitField0_ & ~0x00002000); @@ -4515,8 +3757,6 @@ public Builder clearStringType() { } /** * optional string stringType = 14; - * @param value The bytes for stringType to set. - * @return This builder for chaining. */ public Builder setStringTypeBytes( com.google.protobuf.ByteString value) { @@ -4529,27 +3769,22 @@ public Builder setStringTypeBytes( return this; } + // optional bytes bytesType = 15; private com.google.protobuf.ByteString bytesType_ = com.google.protobuf.ByteString.EMPTY; /** * optional bytes bytesType = 15; - * @return Whether the bytesType field is set. */ - @java.lang.Override public boolean hasBytesType() { - return ((bitField0_ & 0x00004000) != 0); + return ((bitField0_ & 0x00004000) == 0x00004000); } /** * optional bytes bytesType = 15; - * @return The bytesType. */ - @java.lang.Override public com.google.protobuf.ByteString getBytesType() { return bytesType_; } /** * optional bytes bytesType = 15; - * @param value The bytesType to set. - * @return This builder for chaining. */ public Builder setBytesType(com.google.protobuf.ByteString value) { if (value == null) { @@ -4562,7 +3797,6 @@ public Builder setBytesType(com.google.protobuf.ByteString value) { } /** * optional bytes bytesType = 15; - * @return This builder for chaining. */ public Builder clearBytesType() { bitField0_ = (bitField0_ & ~0x00004000); @@ -4571,16 +3805,17 @@ public Builder clearBytesType() { return this; } + // repeated .MapFieldEntry mapType = 16; private java.util.List mapType_ = java.util.Collections.emptyList(); private void ensureMapTypeIsMutable() { - if (!((bitField0_ & 0x00008000) != 0)) { + if (!((bitField0_ & 0x00008000) == 0x00008000)) { mapType_ = new java.util.ArrayList(mapType_); bitField0_ |= 0x00008000; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder> mapTypeBuilder_; /** @@ -4712,8 +3947,7 @@ public Builder addAllMapType( java.lang.Iterable values) { if (mapTypeBuilder_ == null) { ensureMapTypeIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, mapType_); + super.addAll(values, mapType_); onChanged(); } else { mapTypeBuilder_.addAllMessages(values); @@ -4796,14 +4030,14 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder getMapTypeBuilderList() { return getMapTypeFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder> getMapTypeFieldBuilder() { if (mapTypeBuilder_ == null) { - mapTypeBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + mapTypeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntryOrBuilder>( mapType_, - ((bitField0_ & 0x00008000) != 0), + ((bitField0_ & 0x00008000) == 0x00008000), getParentForChildren(), isClean()); mapType_ = null; @@ -4811,40 +4045,35 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.MapFieldEntry.Builder return mapTypeBuilder_; } + // repeated string stringListType = 17; private com.google.protobuf.LazyStringList stringListType_ = com.google.protobuf.LazyStringArrayList.EMPTY; private void ensureStringListTypeIsMutable() { - if (!((bitField0_ & 0x00010000) != 0)) { + if (!((bitField0_ & 0x00010000) == 0x00010000)) { stringListType_ = new com.google.protobuf.LazyStringArrayList(stringListType_); bitField0_ |= 0x00010000; } } /** * repeated string stringListType = 17; - * @return A list containing the stringListType. */ - public com.google.protobuf.ProtocolStringList + public java.util.List getStringListTypeList() { - return stringListType_.getUnmodifiableView(); + return java.util.Collections.unmodifiableList(stringListType_); } /** * repeated string stringListType = 17; - * @return The count of stringListType. */ public int getStringListTypeCount() { return stringListType_.size(); } /** * repeated string stringListType = 17; - * @param index The index of the element to return. - * @return The stringListType at the given index. */ public java.lang.String getStringListType(int index) { return stringListType_.get(index); } /** * repeated string stringListType = 17; - * @param index The index of the value to return. - * @return The bytes of the stringListType at the given index. */ public com.google.protobuf.ByteString getStringListTypeBytes(int index) { @@ -4852,9 +4081,6 @@ public java.lang.String getStringListType(int index) { } /** * repeated string stringListType = 17; - * @param index The index to set the value at. - * @param value The stringListType to set. - * @return This builder for chaining. */ public Builder setStringListType( int index, java.lang.String value) { @@ -4868,8 +4094,6 @@ public Builder setStringListType( } /** * repeated string stringListType = 17; - * @param value The stringListType to add. - * @return This builder for chaining. */ public Builder addStringListType( java.lang.String value) { @@ -4883,20 +4107,16 @@ public Builder addStringListType( } /** * repeated string stringListType = 17; - * @param values The stringListType to add. - * @return This builder for chaining. */ public Builder addAllStringListType( java.lang.Iterable values) { ensureStringListTypeIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, stringListType_); + super.addAll(values, stringListType_); onChanged(); return this; } /** * repeated string stringListType = 17; - * @return This builder for chaining. */ public Builder clearStringListType() { stringListType_ = com.google.protobuf.LazyStringArrayList.EMPTY; @@ -4906,8 +4126,6 @@ public Builder clearStringListType() { } /** * repeated string stringListType = 17; - * @param value The bytes of the stringListType to add. - * @return This builder for chaining. */ public Builder addStringListTypeBytes( com.google.protobuf.ByteString value) { @@ -4920,23 +4138,22 @@ public Builder addStringListTypeBytes( return this; } - private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 messageType_; - private com.google.protobuf.SingleFieldBuilderV3< + // optional .Mesg1 messageType = 18; + private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 messageType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance(); + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder> messageTypeBuilder_; /** * optional .Mesg1 messageType = 18; - * @return Whether the messageType field is set. */ public boolean hasMessageType() { - return ((bitField0_ & 0x00020000) != 0); + return ((bitField0_ & 0x00020000) == 0x00020000); } /** * optional .Mesg1 messageType = 18; - * @return The messageType. */ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 getMessageType() { if (messageTypeBuilder_ == null) { - return messageType_ == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance() : messageType_; + return messageType_; } else { return messageTypeBuilder_.getMessage(); } @@ -4976,8 +4193,7 @@ public Builder setMessageType( */ public Builder mergeMessageType(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1 value) { if (messageTypeBuilder_ == null) { - if (((bitField0_ & 0x00020000) != 0) && - messageType_ != null && + if (((bitField0_ & 0x00020000) == 0x00020000) && messageType_ != org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance()) { messageType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.newBuilder(messageType_).mergeFrom(value).buildPartial(); @@ -4996,7 +4212,7 @@ public Builder mergeMessageType(org.apache.hadoop.hive.ql.io.protobuf.SampleProt */ public Builder clearMessageType() { if (messageTypeBuilder_ == null) { - messageType_ = null; + messageType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance(); onChanged(); } else { messageTypeBuilder_.clear(); @@ -5019,20 +4235,19 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMess if (messageTypeBuilder_ != null) { return messageTypeBuilder_.getMessageOrBuilder(); } else { - return messageType_ == null ? - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.getDefaultInstance() : messageType_; + return messageType_; } } /** * optional .Mesg1 messageType = 18; */ - private com.google.protobuf.SingleFieldBuilderV3< + private com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder> getMessageTypeFieldBuilder() { if (messageTypeBuilder_ == null) { - messageTypeBuilder_ = new com.google.protobuf.SingleFieldBuilderV3< + messageTypeBuilder_ = new com.google.protobuf.SingleFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder>( - getMessageType(), + messageType_, getParentForChildren(), isClean()); messageType_ = null; @@ -5040,16 +4255,17 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder getMess return messageTypeBuilder_; } + // repeated .Mesg1 messageListType = 19; private java.util.List messageListType_ = java.util.Collections.emptyList(); private void ensureMessageListTypeIsMutable() { - if (!((bitField0_ & 0x00040000) != 0)) { + if (!((bitField0_ & 0x00040000) == 0x00040000)) { messageListType_ = new java.util.ArrayList(messageListType_); bitField0_ |= 0x00040000; } } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder> messageListTypeBuilder_; /** @@ -5181,8 +4397,7 @@ public Builder addAllMessageListType( java.lang.Iterable values) { if (messageListTypeBuilder_ == null) { ensureMessageListTypeIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll( - values, messageListType_); + super.addAll(values, messageListType_); onChanged(); } else { messageListTypeBuilder_.addAllMessages(values); @@ -5265,14 +4480,14 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder addMessa getMessageListTypeBuilderList() { return getMessageListTypeFieldBuilder().getBuilderList(); } - private com.google.protobuf.RepeatedFieldBuilderV3< + private com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder> getMessageListTypeFieldBuilder() { if (messageListTypeBuilder_ == null) { - messageListTypeBuilder_ = new com.google.protobuf.RepeatedFieldBuilderV3< + messageListTypeBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder, org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1OrBuilder>( messageListType_, - ((bitField0_ & 0x00040000) != 0), + ((bitField0_ & 0x00040000) == 0x00040000), getParentForChildren(), isClean()); messageListType_ = null; @@ -5280,122 +4495,74 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.Mesg1.Builder addMessa return messageListTypeBuilder_; } - private int enumType_ = 1; + // optional .AllTypes.Enum1 enumType = 20; + private org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 enumType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.VAL1; /** * optional .AllTypes.Enum1 enumType = 20; - * @return Whether the enumType field is set. */ - @java.lang.Override public boolean hasEnumType() { - return ((bitField0_ & 0x00080000) != 0); + public boolean hasEnumType() { + return ((bitField0_ & 0x00080000) == 0x00080000); } /** * optional .AllTypes.Enum1 enumType = 20; - * @return The enumType. */ - @java.lang.Override public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 getEnumType() { - @SuppressWarnings("deprecation") - org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 result = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.valueOf(enumType_); - return result == null ? org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.VAL1 : result; + return enumType_; } /** * optional .AllTypes.Enum1 enumType = 20; - * @param value The enumType to set. - * @return This builder for chaining. */ public Builder setEnumType(org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1 value) { if (value == null) { throw new NullPointerException(); } bitField0_ |= 0x00080000; - enumType_ = value.getNumber(); + enumType_ = value; onChanged(); return this; } /** * optional .AllTypes.Enum1 enumType = 20; - * @return This builder for chaining. */ public Builder clearEnumType() { bitField0_ = (bitField0_ & ~0x00080000); - enumType_ = 1; + enumType_ = org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes.Enum1.VAL1; onChanged(); return this; } - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - // @@protoc_insertion_point(builder_scope:AllTypes) } - // @@protoc_insertion_point(class_scope:AllTypes) - private static final org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes DEFAULT_INSTANCE; static { - DEFAULT_INSTANCE = new org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes(); - } - - public static org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - @java.lang.Deprecated public static final com.google.protobuf.Parser - PARSER = new com.google.protobuf.AbstractParser() { - @java.lang.Override - public AllTypes parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AllTypes(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes getDefaultInstanceForType() { - return DEFAULT_INSTANCE; + defaultInstance = new AllTypes(true); + defaultInstance.initFields(); } + // @@protoc_insertion_point(class_scope:AllTypes) } - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_MapFieldEntry_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_MapFieldEntry_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_Mesg1_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_Mesg1_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor + private static com.google.protobuf.Descriptors.Descriptor internal_static_AllTypes_descriptor; - private static final - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + private static + com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_AllTypes_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; } - private static com.google.protobuf.Descriptors.FileDescriptor + private static com.google.protobuf.Descriptors.FileDescriptor descriptor; static { java.lang.String[] descriptorData = { @@ -5408,7 +4575,7 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes getDefaultIns "pe\030\004 \001(\003\022\022\n\nuint32Type\030\005 \001(\r\022\022\n\nuint64Ty" + "pe\030\006 \001(\004\022\022\n\nsint32Type\030\007 \001(\021\022\022\n\nsint64Ty" + "pe\030\010 \001(\022\022\023\n\013fixed32Type\030\t \001(\007\022\023\n\013fixed64" + - "Type\030\n \001(\006\022\024\n\014sfixed32Type\030\013 \001(\017\022\024\n\014sfix" + + "Type\030\n \001(\006\022\024\n\014sfixed32Type\030\013 \001(\017\022\024\n\014sfix", "ed64Type\030\014 \001(\020\022\020\n\010boolType\030\r \001(\010\022\022\n\nstri" + "ngType\030\016 \001(\t\022\021\n\tbytesType\030\017 \001(\014\022\037\n\007mapTy" + "pe\030\020 \003(\0132\016.MapFieldEntry\022\026\n\016stringListTy" + @@ -5418,28 +4585,36 @@ public org.apache.hadoop.hive.ql.io.protobuf.SampleProtos.AllTypes getDefaultIns "L1\020\001\022\010\n\004VAL2\020\002B5\n%org.apache.hadoop.hive" + ".ql.io.protobufB\014SampleProtos" }; - descriptor = com.google.protobuf.Descriptors.FileDescriptor + com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = + new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { + public com.google.protobuf.ExtensionRegistry assignDescriptors( + com.google.protobuf.Descriptors.FileDescriptor root) { + descriptor = root; + internal_static_MapFieldEntry_descriptor = + getDescriptor().getMessageTypes().get(0); + internal_static_MapFieldEntry_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_MapFieldEntry_descriptor, + new java.lang.String[] { "Key", "Value", }); + internal_static_Mesg1_descriptor = + getDescriptor().getMessageTypes().get(1); + internal_static_Mesg1_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_Mesg1_descriptor, + new java.lang.String[] { "AnotherMap", "NoMap", "IntList", }); + internal_static_AllTypes_descriptor = + getDescriptor().getMessageTypes().get(2); + internal_static_AllTypes_fieldAccessorTable = new + com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_AllTypes_descriptor, + new java.lang.String[] { "DoubleType", "FloatType", "Int32Type", "Int64Type", "Uint32Type", "Uint64Type", "Sint32Type", "Sint64Type", "Fixed32Type", "Fixed64Type", "Sfixed32Type", "Sfixed64Type", "BoolType", "StringType", "BytesType", "MapType", "StringListType", "MessageType", "MessageListType", "EnumType", }); + return null; + } + }; + com.google.protobuf.Descriptors.FileDescriptor .internalBuildGeneratedFileFrom(descriptorData, new com.google.protobuf.Descriptors.FileDescriptor[] { - }); - internal_static_MapFieldEntry_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_MapFieldEntry_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_MapFieldEntry_descriptor, - new java.lang.String[] { "Key", "Value", }); - internal_static_Mesg1_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_Mesg1_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_Mesg1_descriptor, - new java.lang.String[] { "AnotherMap", "NoMap", "IntList", }); - internal_static_AllTypes_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_AllTypes_fieldAccessorTable = new - com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_AllTypes_descriptor, - new java.lang.String[] { "DoubleType", "FloatType", "Int32Type", "Int64Type", "Uint32Type", "Uint64Type", "Sint32Type", "Sint64Type", "Fixed32Type", "Fixed64Type", "Sfixed32Type", "Sfixed64Type", "BoolType", "StringType", "BytesType", "MapType", "StringListType", "MessageType", "MessageListType", "EnumType", }); + }, assigner); } // @@protoc_insertion_point(outer_class_scope) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Context.java b/ql/src/java/org/apache/hadoop/hive/ql/Context.java index 17e4d4ae724b..c75152767707 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Context.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Context.java @@ -26,14 +26,14 @@ import java.util.Collection; import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Random; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicInteger; +import com.google.common.base.Preconditions; import org.antlr.runtime.TokenRewriteStream; import org.apache.commons.compress.utils.Lists; import org.apache.commons.lang3.tuple.Pair; @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.BlobStorageUtils; import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.cleanup.CleanupService; import org.apache.hadoop.hive.ql.cleanup.SyncCleanupService; @@ -63,6 +64,7 @@ import org.apache.hadoop.hive.ql.parse.HiveParser; import org.apache.hadoop.hive.ql.parse.QB; import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.plan.mapper.EmptyStatsSource; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; import org.apache.hadoop.hive.ql.plan.mapper.StatsSource; @@ -140,6 +142,7 @@ public class Context { private AtomicInteger sequencer = new AtomicInteger(); private final Map cteTables = new HashMap(); + private final Map cteTableStats = new HashMap<>(); // Keep track of the mapping from load table desc to the output and the lock private final Map loadTableOutputMap = @@ -250,6 +253,21 @@ public String toString() { return prefix; } } + public enum RewritePolicy { + + DEFAULT, + ALL_PARTITIONS; + + public static RewritePolicy fromString(String rewritePolicy) { + Preconditions.checkArgument(null != rewritePolicy, "Invalid rewrite policy: null"); + + try { + return valueOf(rewritePolicy.toUpperCase(Locale.ENGLISH)); + } catch (IllegalArgumentException var2) { + throw new IllegalArgumentException(String.format("Invalid rewrite policy: %s", rewritePolicy), var2); + } + } + } private String getMatchedText(ASTNode n) { return getTokenRewriteStream().toString(n.getTokenStartIndex(), n.getTokenStopIndex() + 1).trim(); } @@ -387,8 +405,8 @@ private Context(Configuration conf, String executionId) { // all external file systems nonLocalScratchPath = new Path(SessionState.getHDFSSessionPath(conf), executionId); localScratchDir = new Path(SessionState.getLocalSessionPath(conf), executionId).toUri().getPath(); - scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); - stagingDir = HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR); + scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR_PERMISSION); + stagingDir = HiveConf.getVar(conf, HiveConf.ConfVars.STAGING_DIR); opContext = new CompilationOpContext(); viewsTokenRewriteStreams = new HashMap<>(); @@ -1210,8 +1228,14 @@ public Table getMaterializedTable(String cteName) { return cteTables.get(cteName); } - public void addMaterializedTable(String cteName, Table table) { + public void addMaterializedTable(String cteName, Table table, Statistics statistics) { cteTables.put(cteName, table); + cteTables.put(table.getFullyQualifiedName(), table); + cteTableStats.put(table.getFullTableName(), statistics); + } + + public Statistics getMaterializedTableStats(TableName tableName) { + return cteTableStats.get(tableName); } public AtomicInteger getSequencer() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java index 93b153886c99..89bff9678f1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/DriverTxnHandler.java @@ -385,7 +385,7 @@ private ValidTxnWriteIdList getTxnWriteIds(String txnString) throws LockExceptio private void setValidWriteIds(ValidTxnWriteIdList txnWriteIds) { driverContext.getConf().set(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY, txnWriteIds.toString()); if (driverContext.getPlan().getFetchTask() != null) { - // This is needed for {@link HiveConf.ConfVars.HIVEFETCHTASKCONVERSION} optimization which initializes JobConf + // This is needed for {@link HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION} optimization which initializes JobConf // in FetchOperator before recordValidTxns() but this has to be done after locks are acquired to avoid race // conditions in ACID. This case is supported only for single source query. Operator source = driverContext.getPlan().getFetchTask().getWork().getSource(); @@ -602,7 +602,7 @@ synchronized void endTransactionAndCleanup(boolean commit, HiveTxnManager txnMan private void commitOrRollback(boolean commit, HiveTxnManager txnManager) throws LockException { if (commit) { if (driverContext.getConf().getBoolVar(ConfVars.HIVE_IN_TEST) && - driverContext.getConf().getBoolVar(ConfVars.HIVETESTMODEROLLBACKTXN)) { + driverContext.getConf().getBoolVar(ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN)) { txnManager.rollbackTxn(); } else { txnManager.commitTxn(); //both commit & rollback clear ALL locks for this transaction diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Executor.java b/ql/src/java/org/apache/hadoop/hive/ql/Executor.java index 1e2140ed8025..708e3870efa4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Executor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Executor.java @@ -297,7 +297,7 @@ private void handleFinished() throws Exception { } private String getJobName() { - int maxlen = driverContext.getConf().getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxlen = driverContext.getConf().getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); return Utilities.abbreviate(driverContext.getQueryString(), maxlen - 6); } @@ -322,7 +322,7 @@ private int getJobCount() { private void launchTasks(boolean noName, int jobCount, String jobName) throws HiveException { // Launch upto maxthreads tasks Task task; - int maxthreads = HiveConf.getIntVar(driverContext.getConf(), HiveConf.ConfVars.EXECPARALLETHREADNUMBER); + int maxthreads = HiveConf.getIntVar(driverContext.getConf(), HiveConf.ConfVars.EXEC_PARALLEL_THREAD_NUMBER); while ((task = taskQueue.getRunnable(maxthreads)) != null) { TaskRunner runner = launchTask(task, noName, jobName, jobCount); if (!runner.isRunning()) { @@ -346,7 +346,7 @@ private TaskRunner launchTask(Task task, boolean noName, String jobName, int TaskRunner taskRun = new TaskRunner(task, taskQueue); taskQueue.launching(taskRun); - if (HiveConf.getBoolVar(task.getConf(), HiveConf.ConfVars.EXECPARALLEL) && task.canExecuteInParallel()) { + if (HiveConf.getBoolVar(task.getConf(), HiveConf.ConfVars.EXEC_PARALLEL) && task.canExecuteInParallel()) { LOG.info("Starting task [" + task + "] in parallel"); taskRun.start(); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java index b39037cd65e1..10025dbd9026 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/QueryState.java @@ -98,7 +98,7 @@ private QueryState(HiveConf conf) { // Get the query id stored in query specific config. public String getQueryId() { - return queryConf.getVar(HiveConf.ConfVars.HIVEQUERYID); + return queryConf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID); } public String getQueryString() { @@ -172,15 +172,15 @@ public void setNumModifiedRows(long numModifiedRows) { } public String getQueryTag() { - return HiveConf.getVar(this.queryConf, HiveConf.ConfVars.HIVEQUERYTAG); + return HiveConf.getVar(this.queryConf, HiveConf.ConfVars.HIVE_QUERY_TAG); } public void setQueryTag(String queryTag) { - HiveConf.setVar(this.queryConf, HiveConf.ConfVars.HIVEQUERYTAG, queryTag); + HiveConf.setVar(this.queryConf, HiveConf.ConfVars.HIVE_QUERY_TAG, queryTag); } public static void setApplicationTag(HiveConf queryConf, String queryTag) { - String jobTag = HiveConf.getVar(queryConf, HiveConf.ConfVars.HIVEQUERYTAG); + String jobTag = HiveConf.getVar(queryConf, HiveConf.ConfVars.HIVE_QUERY_TAG); if (jobTag == null || jobTag.isEmpty()) { jobTag = queryTag; } else { @@ -327,13 +327,13 @@ public QueryState build() { // Generate the new queryId if needed if (generateNewQueryId) { String queryId = QueryPlan.makeQueryId(); - queryConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + queryConf.setVar(HiveConf.ConfVars.HIVE_QUERY_ID, queryId); setApplicationTag(queryConf, queryId); // FIXME: druid storage handler relies on query.id to maintain some staging directories // expose queryid to session level if (hiveConf != null) { - hiveConf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + hiveConf.setVar(HiveConf.ConfVars.HIVE_QUERY_ID, queryId); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java index fda2282cbfa5..cd0392dd066c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/database/lock/LockDatabaseAnalyzer.java @@ -52,7 +52,7 @@ public void analyzeInternal(ASTNode root) throws SemanticException { outputs.add(new WriteEntity(getDatabase(databaseName), WriteType.DDL_NO_LOCK)); LockDatabaseDesc desc = - new LockDatabaseDesc(databaseName, mode, HiveConf.getVar(conf, ConfVars.HIVEQUERYID), ctx.getCmd()); + new LockDatabaseDesc(databaseName, mode, HiveConf.getVar(conf, ConfVars.HIVE_QUERY_ID), ctx.getCmd()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), desc))); ctx.setNeedLockMgr(true); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/MsckOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/MsckOperation.java index 0e0439209f2b..e7e39021d884 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/MsckOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/misc/msck/MsckOperation.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hive.metastore.PartitionManagementTask; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetastoreException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.ddl.DDLOperation; @@ -51,7 +52,7 @@ public MsckOperation(DDLOperationContext context, MsckDesc desc) { } @Override - public int execute() throws HiveException, IOException, TException { + public int execute() throws HiveException, IOException, TException, MetastoreException { try { Msck msck = new Msck(false, false); msck.init(Msck.getMsckConf(context.getDb().getConf())); @@ -75,9 +76,9 @@ public int execute() throws HiveException, IOException, TException { desc.getFilterExp(), desc.getResFile(), desc.isRepairPartitions(), desc.isAddPartitions(), desc.isDropPartitions(), partitionExpirySeconds); return msck.repair(msckInfo); - } catch (MetaException e) { + } catch (MetaException | MetastoreException e) { LOG.error("Unable to create msck instance.", e); - return 1; + throw e; } catch (SemanticException e) { LOG.error("Msck failed.", e); return 1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java index 17f9fec4d177..9e7688a5b29a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/AbstractBaseAlterTableAnalyzer.java @@ -105,7 +105,7 @@ protected void addInputsOutputsAlterTable(TableName tableName, Map partitio private void addStatTask(ASTNode root, Table table, Path oldPartitionLocation, Path newPartitionLocation, LoadTableDesc loadTableDesc, Task moveTask) throws SemanticException { // Recalculate the HDFS stats if auto gather stats is set - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { BasicStatsWork basicStatsWork; if (oldPartitionLocation.equals(newPartitionLocation)) { // If we're merging to the same location, we can avoid some metastore calls diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java index df8d0037845e..28cd5019c1a3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/PartitionUtils.java @@ -62,7 +62,7 @@ private PartitionUtils() { public static void validatePartitions(HiveConf conf, Map partitionSpec) throws SemanticException { Set reservedPartitionValues = new HashSet<>(); // Partition can't have this name - reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME)); + reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_PARTITION_NAME)); reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.DEFAULT_ZOOKEEPER_PARTITION_NAME)); // Partition value can't end in this suffix reservedPartitionValues.add(HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java index b8433d261707..4d73782aaee4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/add/AbstractAddPartitionAnalyzer.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLWork; -import org.apache.hadoop.hive.ql.ddl.DDLDesc.DDLDescWithWriteId; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableAnalyzer; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; @@ -119,7 +118,7 @@ private List createPartitions(ASTNode private AlterTableAddPartitionDesc.PartitionDesc createPartitionDesc(Table table, String location, Map partitionSpec) { Map params = null; - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER) && location == null) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER) && location == null) { params = new HashMap(); StatsSetupConst.setStatsStateForCreateTable(params, MetaStoreUtils.getColumnNames(table.getCols()), StatsSetupConst.TRUE); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java index 0fd8785d1bc7..24deedf63b0c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/alter/AlterTableAlterPartitionOperation.java @@ -97,7 +97,7 @@ private void checkPartitionValues(Table tbl, int colIndex) throws HiveException try { List values = Warehouse.getPartValuesFromPartName(partName); String value = values.get(colIndex); - if (value.equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME))) { + if (value.equals(context.getConf().getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME))) { continue; } Object convertedValue = converter.convert(value); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java index 40500f13ff4b..c0bffcebdb23 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/partition/show/ShowPartitionAnalyzer.java @@ -119,7 +119,7 @@ ExprNodeDesc getShowPartitionsFilter(Table table, ASTNode command) throws Semant } showFilter = replaceDefaultPartNameAndCastType(target, colTypes, - HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME)); + HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME)); } } return showFilter; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java index 4b793eb545f0..b419c8332354 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AbstractAlterTableArchiveAnalyzer.java @@ -51,7 +51,7 @@ public AbstractAlterTableArchiveAnalyzer(QueryState queryState) throws SemanticE // the AST tree protected void analyzeCommand(TableName tableName, Map partSpec, ASTNode command) throws SemanticException { - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEARCHIVEENABLED)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_ARCHIVE_ENABLED)) { throw new SemanticException(ErrorMsg.ARCHIVE_METHODS_DISABLED.getMsg()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java index f54ea6a6a727..e218e590a24e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/archive/AlterTableArchiveOperation.java @@ -182,7 +182,7 @@ private Path createArchiveInTmpDir(Table table, PartSpecInfo partitionSpecInfo, context.getConsole().printInfo("Creating " + ARCHIVE_NAME + " for " + originalDir.toString() + " in " + tmpPath); context.getConsole().printInfo("Please wait... (this may take a while)"); try { - int maxJobNameLength = context.getConf().getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxJobNameLength = context.getConf().getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); String jobName = String.format("Archiving %s@%s", table.getTableName(), partitionSpecInfo.getName()); jobName = Utilities.abbreviate(jobName, maxJobNameLength - 6); context.getConf().set(MRJobConfig.JOB_NAME, jobName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java index cc896331aff7..8e86056053c8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/compact/AlterTableCompactOperation.java @@ -52,7 +52,7 @@ public AlterTableCompactOperation(DDLOperationContext context, AlterTableCompact @Override public int execute() throws Exception { Table table = context.getDb().getTable(desc.getTableName()); - if (!AcidUtils.isTransactionalTable(table)) { + if (!AcidUtils.isTransactionalTable(table) && !AcidUtils.isNonNativeAcidTable(table)) { throw new HiveException(ErrorMsg.NONACID_COMPACTION_NOT_SUPPORTED, table.getDbName(), table.getTableName()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java index 8e4a9d1b6fd1..1322ace7e918 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/table/storage/concatenate/AlterTableConcatenateAnalyzer.java @@ -41,8 +41,6 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; @@ -214,7 +212,7 @@ private void addMoveTask(TableName tableName, Table table, Map p private void addStatTask(TableName tableName, Table table, Map partitionSpec, Path oldLocation, Path newLocation, LoadTableDesc loadTableDesc, Task moveTask) throws SemanticException { - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { BasicStatsWork basicStatsWork; if (oldLocation.equals(newLocation)) { // If we're merging to the same location, we can avoid some metastore calls diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java index c9020b7cbdd8..c0990e8c5680 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rebuild/AlterMaterializedViewRebuildAnalyzer.java @@ -44,22 +44,24 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.metadata.VirtualColumn; -import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTezModelRelMetadataProvider; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveInBetweenExpandRule; -import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.ColumnPropagationException; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregateInsertDeleteIncrementalRewritingRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregateInsertIncrementalRewritingRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAggregatePartitionIncrementalRewritingRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentMaterializationRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveAugmentSnapshotMaterializationRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveInsertOnlyScanWriteIdRule; -import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveJoinInsertDeleteIncrementalRewritingRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveJoinInsertIncrementalRewritingRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializationRelMetadataProvider; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewRule; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HivePushdownSnapshotFilterRule; +import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveRowIsDeletedPropagator; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.MaterializedViewRewritingRelVisitor; import org.apache.hadoop.hive.ql.optimizer.calcite.stats.HiveIncrementalRelMdRowCount; import org.apache.hadoop.hive.ql.parse.ASTNode; @@ -69,6 +71,7 @@ import org.apache.hadoop.hive.ql.parse.ParseDriver; import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; +import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.HiveOperation; import org.apache.hadoop.hive.ql.plan.mapper.StatsSource; @@ -85,8 +88,48 @@ import static java.util.Collections.singletonList; /** - * Analyzer for alter materialized view rebuild commands. + * Semantic analyzer for alter materialized view rebuild commands. + * This subclass of {@link SemanticAnalyzer} generates a plan which is derived from the materialized view definition + * query plan. + *
+ * Steps: + *
    + *
  • Take the Calcite plan of the materialized view definition query.
  • + *
  • Using the snapshot data in materialized view metadata insert A {@link HiveFilter} operator on top of each + * {@link HiveTableScan} operator. The condition has a predicate like ROW_ID.writeid <= high_watermark + * This step is done by {@link HiveAugmentMaterializationRule} or {@link HiveAugmentSnapshotMaterializationRule}. + * The resulting plan should produce the current result of the materialized view, the one which was created at last + * rebuild.
  • + *
  • Transform the original view definition query plan using + * Union rewrite or + * Union rewrite with aggregate and + * the augmented plan. The result plan has a {@link HiveUnion} operator on top with two branches + *
      + *
    • Scan the materialize view for existing records
    • + *
    • A plan which is derived from the augmented materialized view definition query plan. This produces the + * newly inserted records
    • + *
    + *
  • + *
  • Transform the plan into incremental rebuild plan if possible: + *
      + *
    • The materialized view definition query has aggregate and base tables has insert operations only. + * {@link HiveAggregateInsertIncrementalRewritingRule}
    • + *
    • The materialized view definition query hasn't got aggregate and base tables has insert operations only. + * {@link HiveJoinInsertIncrementalRewritingRule}
    • + *
    • The materialized view definition query has aggregate and any base tables has delete operations. + * {@link HiveAggregateInsertDeleteIncrementalRewritingRule}
    • + *
    • The materialized view definition query hasn't got aggregate and any base tables has delete operations. + * Incremental rebuild is not possible because all records from all source tables need a unique identifier to + * join it with the corresponding record exists in the view. ROW__ID can not be used because it's writedId + * component is changed at delete and unique and primary key constraints are not enforced in Hive. + *
    • + *
    + * When any base tables has delete operations the {@link HiveTableScan} operators are fetching the deleted rows too + * and {@link HiveRowIsDeletedPropagator} ensures that extra filter conditions are added to address these. + *
  • + *
*/ + @DDLType(types = HiveParser.TOK_ALTER_MATERIALIZED_VIEW_REBUILD) public class AlterMaterializedViewRebuildAnalyzer extends CalcitePlanner { private static final Logger LOG = LoggerFactory.getLogger(AlterMaterializedViewRebuildAnalyzer.class); @@ -276,7 +319,12 @@ protected RelNode applyMaterializedViewRewriting(RelOptPlanner planner, RelNode } RelNode incrementalRebuildPlan = applyRecordIncrementalRebuildPlan( - basePlan, mdProvider, executorProvider, optCluster, calcitePreMVRewritingPlan, materialization); + basePlan, + mdProvider, + executorProvider, + optCluster, + calcitePreMVRewritingPlan, + materialization); if (mvRebuildMode != MaterializationRebuildMode.INSERT_OVERWRITE_REBUILD) { return incrementalRebuildPlan; @@ -291,18 +339,19 @@ protected RelNode applyMaterializedViewRewriting(RelOptPlanner planner, RelNode } private RelNode applyRecordIncrementalRebuildPlan( - RelNode basePlan, - RelMetadataProvider mdProvider, - RexExecutor executorProvider, - RelOptCluster optCluster, - RelNode calcitePreMVRewritingPlan, - HiveRelOptMaterialization materialization) { + RelNode basePlan, + RelMetadataProvider mdProvider, + RexExecutor executorProvider, + RelOptCluster optCluster, + RelNode calcitePreMVRewritingPlan, + HiveRelOptMaterialization materialization) { // First we need to check if it is valid to convert to MERGE/INSERT INTO. // If we succeed, we modify the plan and afterwards the AST. // MV should be an acid table. boolean acidView = AcidUtils.isFullAcidTable(mvTable.getTTable()) || AcidUtils.isNonNativeAcidTable(mvTable); - MaterializedViewRewritingRelVisitor visitor = new MaterializedViewRewritingRelVisitor(acidView); + MaterializedViewRewritingRelVisitor visitor = + new MaterializedViewRewritingRelVisitor(acidView); visitor.go(basePlan); if (visitor.isRewritingAllowed()) { if (!materialization.isSourceTablesUpdateDeleteModified()) { @@ -313,18 +362,10 @@ private RelNode applyRecordIncrementalRebuildPlan( return applyJoinInsertIncremental(basePlan, mdProvider, executorProvider); } } else { - if (acidView) { - if (visitor.isContainsAggregate()) { - if (visitor.getCountIndex() < 0) { - // count(*) is necessary for determine which rows should be deleted from the view - // if view definition does not have it incremental rebuild can not be performed, bail out - return calcitePreMVRewritingPlan; - } - return applyAggregateInsertDeleteIncremental(basePlan, mdProvider, executorProvider); - } else { - return applyJoinInsertDeleteIncremental( - basePlan, mdProvider, executorProvider, optCluster, calcitePreMVRewritingPlan); - } + // count(*) is necessary for determine which rows should be deleted from the view + // if view definition does not have it incremental rebuild can not be performed + if (acidView && visitor.isContainsAggregate() && visitor.getCountIndex() >= 0) { + return applyAggregateInsertDeleteIncremental(basePlan, mdProvider, executorProvider); } else { return calcitePreMVRewritingPlan; } @@ -373,21 +414,6 @@ private RelNode applyAggregateInsertIncremental( } } - private RelNode applyJoinInsertDeleteIncremental( - RelNode basePlan, RelMetadataProvider mdProvider, RexExecutor executorProvider, RelOptCluster optCluster, - RelNode calcitePreMVRewritingPlan) { - basePlan = applyIncrementalRebuild( - basePlan, mdProvider, executorProvider, HiveJoinInsertDeleteIncrementalRewritingRule.INSTANCE); - mvRebuildMode = MaterializationRebuildMode.JOIN_INSERT_DELETE_REBUILD; - try { - return new HiveJoinInsertDeleteIncrementalRewritingRule.FilterPropagator( - HiveRelFactories.HIVE_BUILDER.create(optCluster, null)).propagate(basePlan); - } catch (ColumnPropagationException ex) { - LOG.warn("Exception while propagating column " + VirtualColumn.ROWISDELETED.getName(), ex); - return calcitePreMVRewritingPlan; - } - } - private RelNode applyJoinInsertIncremental( RelNode basePlan, RelMetadataProvider mdProvider, RexExecutor executorProvider) { mvRebuildMode = MaterializationRebuildMode.JOIN_INSERT_REBUILD; @@ -396,9 +422,9 @@ private RelNode applyJoinInsertIncremental( } private RelNode applyPartitionIncrementalRebuildPlan( - RelNode basePlan, RelMetadataProvider mdProvider, RexExecutor executorProvider, - HiveRelOptMaterialization materialization, RelOptCluster optCluster, - RelNode calcitePreMVRewritingPlan) { + RelNode basePlan, RelMetadataProvider mdProvider, RexExecutor executorProvider, + HiveRelOptMaterialization materialization, RelOptCluster optCluster, + RelNode calcitePreMVRewritingPlan) { if (materialization.isSourceTablesUpdateDeleteModified()) { // TODO: Create rewrite rule to transform the plan to partition based incremental rebuild @@ -479,9 +505,6 @@ protected ASTNode fixUpAfterCbo(ASTNode originalAst, ASTNode newAst, CalcitePlan case AGGREGATE_INSERT_DELETE_REBUILD: fixUpASTAggregateInsertDeleteIncrementalRebuild(fixedAST, getMaterializedViewASTBuilder()); return fixedAST; - case JOIN_INSERT_DELETE_REBUILD: - fixUpASTJoinInsertDeleteIncrementalRebuild(fixedAST, getMaterializedViewASTBuilder()); - return fixedAST; default: throw new UnsupportedOperationException("No materialized view rebuild exists for mode " + mvRebuildMode); } @@ -752,89 +775,6 @@ private void fixUpASTJoinInsertIncrementalRebuild(ASTNode newAST) throws Semanti destParent.insertChild(childIndex, newChild); } - private void fixUpASTJoinInsertDeleteIncrementalRebuild(ASTNode newAST, MaterializedViewASTBuilder astBuilder) - throws SemanticException { - // Replace INSERT OVERWRITE by MERGE equivalent rewriting. - // Here we need to do this complex AST rewriting that generates the same plan - // that a MERGE clause would generate because CBO does not support MERGE yet. - // TODO: Support MERGE as first class member in CBO to simplify this logic. - // 1) Replace INSERT OVERWRITE by INSERT - ASTNode insertNode = new ASTSearcher().simpleBreadthFirstSearch( - newAST, HiveParser.TOK_QUERY, HiveParser.TOK_INSERT); - ASTNode destinationNode = (ASTNode) insertNode.getChild(0); - ASTNode newInsertInto = (ASTNode) ParseDriver.adaptor.create( - HiveParser.TOK_INSERT_INTO, "TOK_INSERT_INTO"); - newInsertInto.addChildren(destinationNode.getChildren()); - ASTNode destinationParentNode = (ASTNode) destinationNode.getParent(); - int childIndex = destinationNode.childIndex; - destinationParentNode.deleteChild(childIndex); - destinationParentNode.insertChild(childIndex, newInsertInto); - // 1.1) Extract name as we will need it afterwards: - // TOK_DESTINATION TOK_TAB TOK_TABNAME - ASTNode materializationNode = new ASTSearcher().simpleBreadthFirstSearch( - newInsertInto, HiveParser.TOK_INSERT_INTO, HiveParser.TOK_TAB, HiveParser.TOK_TABNAME); - - ASTNode subqueryNodeInputROJ = new ASTSearcher().simpleBreadthFirstSearch( - newAST, HiveParser.TOK_QUERY, HiveParser.TOK_FROM, HiveParser.TOK_RIGHTOUTERJOIN, - HiveParser.TOK_SUBQUERY); - ASTNode selectNodeInputROJ = new ASTSearcher().simpleBreadthFirstSearch( - subqueryNodeInputROJ, HiveParser.TOK_SUBQUERY, HiveParser.TOK_QUERY, - HiveParser.TOK_INSERT, HiveParser.TOK_SELECT); - astBuilder.createAcidSortNodes(TableName.getDbTable( - materializationNode.getChild(0).getText(), - materializationNode.getChild(1).getText())) - .forEach(astNode -> ParseDriver.adaptor.addChild(selectNodeInputROJ, astNode)); - - ASTNode whereClauseInInsert = findWhereClause(insertNode); - - // 2) Add filter condition to Insert - // Modifying filter condition. The incremental rewriting rule generated an OR - // clause where first disjunct contains the condition for the DELETE branch. - // TOK_WHERE - // or - // . <- DISJUNCT FOR - // TOK_TABLE_OR_COL - // $hdt$_0 - // ROW__IS__DELETED - // TOK_FUNCTION <- DISJUNCT FOR - // isnull - // . - // TOK_TABLE_OR_COL - // $hdt$_0 - // ROW__IS__DELETED - if (whereClauseInInsert.getChild(0).getType() != HiveParser.KW_OR) { - throw new SemanticException("OR clause expected below TOK_WHERE in incremental rewriting"); - } - // We bypass the OR clause and select the first disjunct - int indexDelete; - int indexInsert; - if (whereClauseInInsert.getChild(0).getChild(0).getType() == HiveParser.DOT) { - indexDelete = 0; - indexInsert = 1; - } else if (whereClauseInInsert.getChild(0).getChild(1).getType() == HiveParser.DOT) { - indexDelete = 1; - indexInsert = 0; - } else { - throw new SemanticException("Unexpected condition in incremental rewriting"); - } - ASTNode newCondInInsert = (ASTNode) whereClauseInInsert.getChild(0).getChild(indexInsert); - ParseDriver.adaptor.setChild(whereClauseInInsert, 0, newCondInInsert); - - ASTNode deletePredicate = (ASTNode) whereClauseInInsert.getChild(0).getChild(indexDelete); - addDeleteBranch(insertNode, subqueryNodeInputROJ, deletePredicate, astBuilder); - - // 3) Add sort node to delete branch - ASTNode sortNode = astBuilder.createSortNodes( - astBuilder.createAcidSortNodes((ASTNode) subqueryNodeInputROJ.getChild(1))); - ParseDriver.adaptor.addChild(insertNode.getParent().getChild(2), sortNode); - - // 4) Now we set some tree properties related to multi-insert - // operation with INSERT/UPDATE - ctx.setOperation(Context.Operation.MERGE); - ctx.addDestNamePrefix(1, Context.DestClausePrefix.INSERT); - ctx.addDestNamePrefix(2, Context.DestClausePrefix.DELETE); - } - @Override protected boolean allowOutputMultipleTimes() { return true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteOperation.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteOperation.java index 4f2b6cccc6eb..f4ada77ba3c4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteOperation.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/alter/rewrite/AlterMaterializedViewRewriteOperation.java @@ -25,11 +25,15 @@ import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.ddl.DDLOperation; import org.apache.hadoop.hive.ql.ddl.DDLOperationContext; +import org.apache.hadoop.hive.ql.metadata.MaterializationValidationResult; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; import org.apache.hadoop.hive.ql.parse.ParseUtils; +import static org.apache.commons.lang3.StringUtils.isNotBlank; +import static org.apache.hadoop.hive.ql.processors.CompileProcessor.console; + /** * Operation process of enabling/disabling materialized view rewrite. */ @@ -64,9 +68,12 @@ public int execute() throws HiveException { } throw new HiveException(msg); } - if (!planner.isValidAutomaticRewritingMaterialization()) { - throw new HiveException("Cannot enable rewriting for materialized view. " + - planner.getInvalidAutomaticRewritingMaterializationReason()); + MaterializationValidationResult validationResult = planner.getMaterializationValidationResult(); + String validationErrorMessage = validationResult.getErrorMessage(); + if (validationResult.getSupportedRewriteAlgorithms().isEmpty()) { + throw new HiveException(validationErrorMessage); + } else if (isNotBlank(validationErrorMessage)) { + console.printError(validationErrorMessage); } } catch (Exception e) { throw new HiveException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsFormatter.java index 7423f9067c06..c8c43af74c43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ddl/view/materialized/show/ShowMaterializedViewsFormatter.java @@ -41,7 +41,7 @@ import static org.apache.hadoop.hive.conf.Constants.MATERIALIZED_VIEW_REWRITING_TIME_WINDOW; import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.IncrementalRebuildMode.UNKNOWN; -import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL; +import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.ALL; /** * Formats SHOW MATERIALIZED VIEWS results. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java index f9db0175edae..c661d2de4f88 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/BinaryRecordReader.java @@ -40,7 +40,7 @@ public class BinaryRecordReader implements RecordReader { public void initialize(InputStream in, Configuration conf, Properties tbl) throws IOException { this.in = in; - maxRecordLength = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVEBINARYRECORDMAX); + maxRecordLength = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_BINARY_RECORD_MAX); } public Writable createRow() throws IOException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java index d8492a16f175..8b6c8d6b1bd4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsUpdateTask.java @@ -317,7 +317,7 @@ private ColumnStatisticsDesc getColumnStatsDesc(String dbName, private int persistColumnStats(Hive db) throws HiveException, MetaException, IOException { ColumnStatistics colStats = constructColumnStatsFromInput(); SetPartitionsStatsRequest request = - new SetPartitionsStatsRequest(Collections.singletonList(colStats), Constants.HIVE_ENGINE); + new SetPartitionsStatsRequest(Collections.singletonList(colStats)); // Set writeId and validWriteId list for replicated statistics. getColStats() will return // non-null value only during replication. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java index e36aaa86cf78..b0b860e16d3f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java @@ -239,7 +239,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { this.hconf = hconf; heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); countAfterReport = 0; totalSz = 0; @@ -296,9 +296,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { dummyObjVectors = new RowContainer[numAliases]; joinEmitInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEJOINEMITINTERVAL); + HiveConf.ConfVars.HIVE_JOIN_EMIT_INTERVAL); joinCacheSize = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEJOINCACHESIZE); + HiveConf.ConfVars.HIVE_JOIN_CACHE_SIZE); logEveryNRows = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVE_LOG_N_RECORDS); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java index 0044a042bd7b..c677796de73a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/CommonMergeJoinOperator.java @@ -136,13 +136,13 @@ public void initializeOp(Configuration hconf) throws HiveException { int bucketSize; - int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE); + int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_BUCKET_CACHE_SIZE); shortcutUnmatchedRows = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_JOIN_SHORTCUT_UNMATCHED_ROWS); if (oldVar != 100) { bucketSize = oldVar; } else { - bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESMBJOINCACHEROWS); + bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_SMBJOIN_CACHE_ROWS); } for (byte pos = 0; pos < order.length; pos++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java index a8d4a8cbf53d..ea6dff1becfc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java @@ -275,7 +275,7 @@ public JSONObject getJSONPlan(PrintStream out, List> tasks, Task fetc return getJSONPlan( out, tasks, fetchTask, jsonOutput, isExtended, appendTaskType, cboInfo, cboPlan, optimizedSQL, - conf.getVar(ConfVars.HIVESTAGEIDREARRANGE)); + conf.getVar(ConfVars.HIVE_STAGE_ID_REARRANGE)); } public JSONObject getJSONPlan(PrintStream out, List> tasks, Task fetchTask, @@ -950,7 +950,7 @@ private String getRuleExcludedRegex() { JSONObject outputPlan(Object work, PrintStream out, boolean extended, boolean jsonOutput, int indent, String appendToHeader) throws Exception { return outputPlan(work, out, extended, jsonOutput, indent, appendToHeader, - queryState.getConf().getBoolVar(ConfVars.HIVE_IN_TEST)); + queryState != null && queryState.getConf().getBoolVar(ConfVars.HIVE_IN_TEST)); } public JSONObject outputPlan(Object work, PrintStream out, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index f47cbffca663..e05e36e60be4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -20,6 +20,7 @@ import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_TEMPORARY_TABLE_STORAGE; import static org.apache.hadoop.hive.ql.security.authorization.HiveCustomStorageHandlerUtils.setWriteOperation; +import static org.apache.hadoop.hive.ql.security.authorization.HiveCustomStorageHandlerUtils.setWriteOperationIsSorted; import java.io.IOException; import java.io.Serializable; @@ -633,6 +634,8 @@ protected void initializeOp(Configuration hconf) throws HiveException { jc = new JobConf(hconf); setWriteOperation(jc, getConf().getTableInfo().getTableName(), getConf().getWriteOperation()); + setWriteOperationIsSorted(jc, getConf().getTableInfo().getTableName(), + dpCtx != null && dpCtx.hasCustomSortExprs()); try { createHiveOutputFormat(jc); @@ -655,7 +658,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { outputClass = serializer.getSerializedClass(); destTablePath = conf.getDestPath(); isInsertOverwrite = conf.getInsertOverwrite(); - counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); LOG.info("Using serializer : " + serializer + " and formatter : " + hiveOutputFormat + (isCompressed ? " with compression" : "")); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java index a3af21a39a99..2fb17e068af5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java @@ -60,9 +60,9 @@ protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); try { heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); conditionEvaluator = ExprNodeEvaluatorFactory.get(conf.getPredicate(), hconf); - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEEXPREVALUATIONCACHE)) { + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_EXPR_EVALUATION_CACHE)) { conditionEvaluator = ExprNodeEvaluatorFactory.toCachedEval(conditionEvaluator); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 871980a64464..e97b56b4dca6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -486,6 +486,9 @@ public final class FunctionRegistry { system.registerGenericUDF("!", GenericUDFOPNot.class); system.registerGenericUDF("between", GenericUDFBetween.class); system.registerGenericUDF("in_bloom_filter", GenericUDFInBloomFilter.class); + system.registerGenericUDF("toMap", GenericUDFToMap.class); + system.registerGenericUDF("toArray", GenericUDFToArray.class); + system.registerGenericUDF("toStruct", GenericUDFToStruct.class); // Utility UDFs system.registerUDF("version", UDFVersion.class, false); @@ -560,7 +563,6 @@ public final class FunctionRegistry { system.registerGenericUDAF("ngrams", new GenericUDAFnGrams()); system.registerGenericUDAF("context_ngrams", new GenericUDAFContextNGrams()); - system.registerGenericUDAF("compute_stats", new GenericUDAFComputeStats()); system.registerGenericUDF("ndv_compute_bit_vector", GenericUDFNDVComputeBitVector.class); system.registerGenericUDAF("compute_bit_vector_hll", new GenericUDAFComputeBitVectorHLL()); system.registerGenericUDAF("compute_bit_vector_fm", new GenericUDAFComputeBitVectorFMSketch()); @@ -613,6 +615,7 @@ public final class FunctionRegistry { system.registerGenericUDF("array_intersect", GenericUDFArrayIntersect.class); system.registerGenericUDF("array_union", GenericUDFArrayUnion.class); system.registerGenericUDF("array_remove", GenericUDFArrayRemove.class); + system.registerGenericUDF("array_position", GenericUDFArrayPosition.class); system.registerGenericUDF("deserialize", GenericUDFDeserialize.class); system.registerGenericUDF("sentences", GenericUDFSentences.class); system.registerGenericUDF("map_keys", GenericUDFMapKeys.class); @@ -780,7 +783,17 @@ public final class FunctionRegistry { try { system.registerGenericUDF("iceberg_bucket", - (Class) Class.forName("org.apache.iceberg.mr.hive.GenericUDFIcebergBucket")); + (Class) Class.forName("org.apache.iceberg.mr.hive.udf.GenericUDFIcebergBucket")); + system.registerGenericUDF("iceberg_truncate", + (Class) Class.forName("org.apache.iceberg.mr.hive.udf.GenericUDFIcebergTruncate")); + system.registerGenericUDF("iceberg_year", + (Class) Class.forName("org.apache.iceberg.mr.hive.udf.GenericUDFIcebergYear")); + system.registerGenericUDF("iceberg_month", + (Class) Class.forName("org.apache.iceberg.mr.hive.udf.GenericUDFIcebergMonth")); + system.registerGenericUDF("iceberg_day", + (Class) Class.forName("org.apache.iceberg.mr.hive.udf.GenericUDFIcebergDay")); + system.registerGenericUDF("iceberg_hour", + (Class) Class.forName("org.apache.iceberg.mr.hive.udf.GenericUDFIcebergHour")); } catch (ClassNotFoundException e) { LOG.warn("iceberg_bucket function could not be registered"); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java index f548afd52401..326c351c7382 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java @@ -206,7 +206,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { numRowsHashTbl = 0; heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); countAfterReport = 0; ObjectInspector rowInspector = inputObjInspectors[0]; @@ -367,7 +367,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { keyPositionsSize = new ArrayList(); aggrPositions = new List[aggregations.length]; groupbyMapAggrInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL); + HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL); // compare every groupbyMapAggrInterval rows numRowsCompareHashAggr = groupbyMapAggrInterval; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java index ce7279c78ab6..fff5446daef8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/HashTableSinkOperator.java @@ -122,7 +122,7 @@ public HashTableSinkOperator(CompilationOpContext ctx, MapJoinOperator mjop) { @SuppressWarnings("unchecked") protected void initializeOp(Configuration hconf) throws HiveException { super.initializeOp(hconf); - boolean isSilent = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVESESSIONSILENT); + boolean isSilent = HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_SESSION_SILENT); console = new LogHelper(LOG, isSilent); memoryExhaustionChecker = MemoryExhaustionCheckerFactory.getChecker(console, hconf, conf); emptyRowContainer.addRow(emptyObjectArray); @@ -172,7 +172,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { } mapJoinTables = new MapJoinPersistableTableContainer[tagLen]; mapJoinTableSerdes = new MapJoinTableContainerSerDe[tagLen]; - hashTableScale = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVEHASHTABLESCALE); + hashTableScale = HiveConf.getLongVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_SCALE); if (hashTableScale <= 0) { hashTableScale = 1; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java index 41bee8d60f37..e5b60e7781ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java @@ -68,7 +68,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { currCount = 0; isMap = hconf.getBoolean("mapred.task.is.map", true); - String queryId = HiveConf.getVar(getConfiguration(), HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(getConfiguration(), HiveConf.ConfVars.HIVE_QUERY_ID); this.runtimeCache = ObjectCacheFactory.getCache(getConfiguration(), queryId, false, true); // this can happen in HS2 while doing local fetch optimization, where LimitOperator is used @@ -168,14 +168,14 @@ public static String getLimitReachedKey(Configuration conf) { } public static boolean checkLimitReached(JobConf jobConf) { - String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_QUERY_ID); String limitReachedKey = getLimitReachedKey(jobConf); return checkLimitReached(jobConf, queryId, limitReachedKey); } public static boolean checkLimitReachedForVertex(JobConf jobConf, String vertexName) { - String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jobConf, HiveConf.ConfVars.HIVE_QUERY_ID); return checkLimitReached(jobConf, queryId, vertexName + LIMIT_REACHED_KEY_SUFFIX); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java index 02352c400842..57b1786483ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java @@ -177,7 +177,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // On Tez only: The hash map might already be cached in the container we run // the task in. On MR: The cache is a no-op. - String queryId = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_QUERY_ID); // The cacheKey may have already been defined in the MapJoin conf spec // as part of the Shared Work Optimization if it can be reused among // multiple mapjoin operators. In that case, we take that key from conf diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java index 9bc6aa1b41f7..2721977d6f9b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java @@ -36,7 +36,6 @@ import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.Context.Operation; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.DDLUtils; @@ -66,7 +65,6 @@ import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.BucketCol; import org.apache.hadoop.hive.ql.optimizer.physical.BucketingSortingCtx.SortCol; import org.apache.hadoop.hive.ql.parse.ExplainConfiguration.AnalyzeState; -import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.LoadFileDesc; import org.apache.hadoop.hive.ql.plan.LoadMultiFilesDesc; @@ -834,7 +832,7 @@ private void checkFileFormats(Hive db, LoadTableDesc tbd, Table table) } // handle file format check for table level - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CHECK_FILEFORMAT)) { boolean flag = true; // work.checkFileFormat is set to true only for Load Task, so assumption here is // dynamic partition context is null diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java index c52ca1877363..5b6248108257 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PTFPartition.java @@ -75,7 +75,7 @@ protected PTFPartition(Configuration cfg, this.inputOI = inputOI; this.outputOI = outputOI; if ( createElemContainer ) { - int containerNumRows = HiveConf.getIntVar(cfg, ConfVars.HIVEJOINCACHESIZE); + int containerNumRows = HiveConf.getIntVar(cfg, ConfVars.HIVE_JOIN_CACHE_SIZE); elems = new PTFRowContainer>(containerNumRows, cfg, null); elems.setSerDe(serDe, outputOI); elems.setTableDesc(PTFRowContainer.createTableDesc(inputOI)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java index 24bec956a69b..bad296e38505 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/PartitionKeySampler.java @@ -135,9 +135,9 @@ public void writePartitionKeys(Path path, JobConf job) throws IOException { // random sampling public static FetchOperator createSampler(FetchWork work, JobConf job, Operator operator) throws HiveException { - int sampleNum = HiveConf.getIntVar(job, HiveConf.ConfVars.HIVESAMPLINGNUMBERFORORDERBY); + int sampleNum = HiveConf.getIntVar(job, HiveConf.ConfVars.HIVE_SAMPLING_NUMBER_FOR_ORDERBY); float samplePercent = - HiveConf.getFloatVar(job, HiveConf.ConfVars.HIVESAMPLINGPERCENTFORORDERBY); + HiveConf.getFloatVar(job, HiveConf.ConfVars.HIVE_SAMPLING_PERCENT_FOR_ORDERBY); if (samplePercent < 0.0 || samplePercent > 1.0) { throw new IllegalArgumentException("Percentile value must be within the range of 0 to 1."); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java index d01da9f8f37e..8c8d90f25377 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java @@ -384,8 +384,8 @@ private void addToCurrentFunctions(String functionName, FunctionInfo functionInf public WindowFunctionInfo getWindowFunctionInfo(String functionName) throws SemanticException { // First try without qualifiers - would resolve builtin/temp functions FunctionInfo info = getFunctionInfo(WINDOW_FUNC_PREFIX + functionName); - // Try qualifying with current db name for permanent functions - if (info == null) { + // Try qualifying with current db name for permanent functions and try register function to session + if (info == null && FunctionRegistry.getFunctionInfo(functionName) != null) { String qualifiedName = FunctionUtils.qualifyFunctionName( functionName, SessionState.get().getCurrentDatabase().toLowerCase()); info = getFunctionInfo(WINDOW_FUNC_PREFIX + qualifiedName); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java index b3f167c3e98d..d119e688b710 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SMBMapJoinOperator.java @@ -132,13 +132,13 @@ protected void initializeOp(Configuration hconf) throws HiveException { int bucketSize; // For backwards compatibility reasons we honor the older - // HIVEMAPJOINBUCKETCACHESIZE if set different from default. + // HIVE_MAPJOIN_BUCKET_CACHE_SIZE if set different from default. // By hive 0.13 we should remove this code. - int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINBUCKETCACHESIZE); + int oldVar = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_BUCKET_CACHE_SIZE); if (oldVar != 100) { bucketSize = oldVar; } else { - bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVESMBJOINCACHEROWS); + bucketSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_SMBJOIN_CACHE_ROWS); } for (byte pos = 0; pos < order.length; pos++) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java index af22e5e95e88..2dd3e0f59522 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java @@ -149,8 +149,8 @@ boolean blackListed(Configuration conf, String name) { if (blackListedConfEntries == null) { blackListedConfEntries = new HashSet(); if (conf != null) { - String bl = conf.get(HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.toString(), - HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST.getDefaultValue()); + String bl = conf.get(HiveConf.ConfVars.HIVE_SCRIPT_ENV_BLACKLIST.toString(), + HiveConf.ConfVars.HIVE_SCRIPT_ENV_BLACKLIST.getDefaultValue()); if (bl != null && !bl.isEmpty()) { String[] bls = bl.split(","); Collections.addAll(blackListedConfEntries, bls); @@ -175,7 +175,7 @@ void addJobConfToEnvironment(Configuration conf, Map env) { String value = conf.get(name); // does variable expansion name = safeEnvVarName(name); boolean truncate = conf - .getBoolean(HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV.toString(), false); + .getBoolean(HiveConf.ConfVars.HIVE_SCRIPT_TRUNCATE_ENV.toString(), false); value = safeEnvVarValue(value, name, truncate); env.put(name, value); } @@ -290,12 +290,12 @@ boolean isBrokenPipeException(IOException e) { } boolean allowPartialConsumption() { - return HiveConf.getBoolVar(hconf, HiveConf.ConfVars.ALLOWPARTIALCONSUMP); + return HiveConf.getBoolVar(hconf, HiveConf.ConfVars.ALLOW_PARTIAL_CONSUMP); } void displayBrokenPipeInfo() { LOG.info("The script did not consume all input data. This is considered as an error."); - LOG.info("set " + HiveConf.ConfVars.ALLOWPARTIALCONSUMP.toString() + "=true; to ignore it."); + LOG.info("set " + HiveConf.ConfVars.ALLOW_PARTIAL_CONSUMP.toString() + "=true; to ignore it."); return; } @@ -339,13 +339,13 @@ public void process(Object row, int tag) throws HiveException { ProcessBuilder pb = new ProcessBuilder(wrappedCmdArgs); Map env = pb.environment(); addJobConfToEnvironment(hconf, env); - env.put(safeEnvVarName(HiveConf.ConfVars.HIVEALIAS.varname), String + env.put(safeEnvVarName(HiveConf.ConfVars.HIVE_ALIAS.varname), String .valueOf(alias)); // Create an environment variable that uniquely identifies this script // operator String idEnvVarName = HiveConf.getVar(hconf, - HiveConf.ConfVars.HIVESCRIPTIDENVVAR); + HiveConf.ConfVars.HIVE_SCRIPT_ID_ENV_VAR); String idEnvVarVal = getOperatorId(); env.put(safeEnvVarName(idEnvVarName), idEnvVarVal); @@ -376,11 +376,11 @@ public void process(Object row, int tag) throws HiveException { .getProperties()); errThread = new StreamThread(scriptErrReader, new ErrorStreamProcessor( - HiveConf.getIntVar(hconf, HiveConf.ConfVars.SCRIPTERRORLIMIT)), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.SCRIPT_ERROR_LIMIT)), "ErrorProcessor"); if (HiveConf - .getBoolVar(hconf, HiveConf.ConfVars.HIVESCRIPTAUTOPROGRESS)) { + .getBoolVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_AUTO_PROGRESS)) { autoProgressor = new AutoProgressor(this.getClass().getName(), reporter, Utilities.getDefaultNotificationInterval(hconf), HiveConf.getTimeVar( @@ -574,7 +574,7 @@ class CounterStatusProcessor { private final Reporter reporter; CounterStatusProcessor(Configuration hconf, Reporter reporter){ - this.reporterPrefix = HiveConf.getVar(hconf, HiveConf.ConfVars.STREAMREPORTERPERFIX); + this.reporterPrefix = HiveConf.getVar(hconf, HiveConf.ConfVars.STREAM_REPORTER_PREFIX); this.counterPrefix = reporterPrefix + "counter:"; this.statusPrefix = reporterPrefix + "status:"; this.reporter = reporter; @@ -625,7 +625,7 @@ class ErrorStreamProcessor implements StreamProcessor { public ErrorStreamProcessor(int maxBytes) { this.maxBytes = maxBytes; lastReportTime = 0; - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.STREAMREPORTERENABLED)){ + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.STREAM_REPORTER_ENABLED)){ counterStatus = new CounterStatusProcessor(hconf, reporter); } } @@ -732,7 +732,7 @@ public void run() { * Wrap the script in a wrapper that allows admins to control. */ protected String[] addWrapper(String[] inArgs) { - String wrapper = HiveConf.getVar(hconf, HiveConf.ConfVars.SCRIPTWRAPPER); + String wrapper = HiveConf.getVar(hconf, HiveConf.ConfVars.SCRIPT_WRAPPER); if (wrapper == null) { return inArgs; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java index ffaa252a25dc..10adb81fe050 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java @@ -66,7 +66,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { assert (colList.get(i) != null); eval[i] = ExprNodeEvaluatorFactory.get(colList.get(i), hconf); } - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEEXPREVALUATIONCACHE)) { + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_EXPR_EVALUATION_CACHE)) { eval = ExprNodeEvaluatorFactory.toCachedEvals(eval); } output = new Object[eval.length]; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java index 843686bb30ad..5b1a7a7c2a1b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java @@ -320,7 +320,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { jc = new JobConf(hconf); } - defaultPartitionName = HiveConf.getVar(hconf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartitionName = HiveConf.getVar(hconf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); currentStat = null; stats = new HashMap(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java index b227a70a147c..df43e9608900 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordReader.java @@ -60,7 +60,7 @@ public int next(Writable row) throws IOException { int bytesConsumed = lineReader.readLine((Text) row); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SCRIPT_ESCAPE)) { return HiveUtils.unescapeText((Text) row); } return bytesConsumed; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java index acbf46678f6a..99c03fa2bd58 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/TextRecordWriter.java @@ -46,7 +46,7 @@ public void write(Writable row) throws IOException { Text text = (Text) row; Text escapeText = text; - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SCRIPT_ESCAPE)) { escapeText = HiveUtils.escapeText(text); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java index c58ed4e564d8..506266c829a7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java @@ -94,7 +94,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { // Set up periodic progress reporting in case the UDTF doesn't output rows // for a while - if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEUDTFAUTOPROGRESS)) { + if (HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_UDTF_AUTO_PROGRESS)) { autoProgressor = new AutoProgressor(this.getClass().getName(), reporter, Utilities.getDefaultNotificationInterval(hconf), HiveConf.getTimeVar( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 470d052d8982..b247e276cb19 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -2939,12 +2939,12 @@ public static Map getFullDPSpecs(Configuration conf, Dyn private static void validateDynPartitionCount(Configuration conf, Collection partitions) throws HiveException { int partsToLoad = partitions.size(); - int maxPartition = HiveConf.getIntVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS); + int maxPartition = HiveConf.getIntVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS); if (partsToLoad > maxPartition) { throw new HiveException("Number of dynamic partitions created is " + partsToLoad + ", which is more than " + maxPartition - +". To solve this try to set " + HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS.varname + +". To solve this try to set " + HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS.varname + " to at least " + partsToLoad + '.'); } } @@ -3356,8 +3356,8 @@ public static String formatMsecToStr(long msec) { */ public static int estimateNumberOfReducers(HiveConf conf, ContentSummary inputSummary, MapWork work, boolean finalMapRed) throws IOException { - long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); - int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); + int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); double samplePercentage = getHighestSamplePercentage(work); long totalInputFileSize = getTotalInputFileSize(inputSummary, work, samplePercentage); @@ -3809,7 +3809,7 @@ public static void setInputPaths(JobConf job, List pathsToAdd) { */ public static void setInputAttributes(Configuration conf, MapWork mWork) { HiveConf.ConfVars var = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ? - HiveConf.ConfVars.HIVETEZINPUTFORMAT : HiveConf.ConfVars.HIVEINPUTFORMAT; + HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT : HiveConf.ConfVars.HIVE_INPUT_FORMAT; if (mWork.getInputformat() != null) { HiveConf.setVar(conf, var, mWork.getInputformat()); } @@ -4199,7 +4199,7 @@ public static int getDPColOffset(FileSinkDesc conf) { public static List getStatsTmpDirs(BaseWork work, Configuration conf) { List statsTmpDirs = new ArrayList<>(); - if (!StatsSetupConst.StatDB.fs.name().equalsIgnoreCase(HiveConf.getVar(conf, ConfVars.HIVESTATSDBCLASS))) { + if (!StatsSetupConst.StatDB.fs.name().equalsIgnoreCase(HiveConf.getVar(conf, ConfVars.HIVE_STATS_DBCLASS))) { // no-op for non-fs stats collection return statsTmpDirs; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java index aa6452e49103..da5bd837e0da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/errors/MapAggrMemErrorHeuristic.java @@ -46,7 +46,7 @@ public MapAggrMemErrorHeuristic() { @Override public void init(String query, JobConf conf) { super.init(query, conf); - configMatches = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE); + configMatches = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE); } @Override @@ -56,9 +56,9 @@ public ErrorAndSolution getErrorAndSolution() { List matchingLines = getRegexToLogLines().get(OUT_OF_MEMORY_REGEX); if (matchingLines.size() > 0) { - String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); + String confName = HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY.toString(); float confValue = HiveConf.getFloatVar(getConf(), - HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); es = new ErrorAndSolution( "Out of memory due to hash maps used in map-side aggregation.", diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index ce12cea66eda..e19b73dabcdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -208,7 +208,7 @@ public ExecDriver(MapredWork plan, JobConf job, boolean isSilent) throws HiveExc @Override public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { Counters.Counter cntr = ctrs.findCounter( - HiveConf.getVar(job, HiveConf.ConfVars.HIVECOUNTERGROUP), + HiveConf.getVar(job, HiveConf.ConfVars.HIVE_COUNTER_GROUP), Operator.HIVE_COUNTER_FATAL); return cntr != null && cntr.getValue() > 0; } @@ -264,7 +264,7 @@ public int execute() { job.setMapOutputValueClass(BytesWritable.class); try { - String partitioner = HiveConf.getVar(job, ConfVars.HIVEPARTITIONER); + String partitioner = HiveConf.getVar(job, ConfVars.HIVE_PARTITIONER); job.setPartitionerClass(JavaUtils.loadClass(partitioner)); } catch (ClassNotFoundException e) { throw new RuntimeException(e.getMessage(), e); @@ -282,7 +282,7 @@ public int execute() { job.setBoolean(MRJobConfig.REDUCE_SPECULATIVE, false); job.setBoolean(MRJobConfig.MAP_SPECULATIVE, false); - String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT); + String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT); if (mWork.isUseBucketizedHiveInputFormat()) { inpFormat = BucketizedHiveInputFormat.class.getName(); @@ -495,19 +495,19 @@ public static void propagateSplitSettings(JobConf job, MapWork work) { } if (work.getMaxSplitSize() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, work.getMaxSplitSize()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, work.getMaxSplitSize()); } if (work.getMinSplitSize() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work.getMinSplitSize()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, work.getMinSplitSize()); } if (work.getMinSplitSizePerNode() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE, work.getMinSplitSizePerNode()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_NODE, work.getMinSplitSizePerNode()); } if (work.getMinSplitSizePerRack() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK, work.getMinSplitSizePerRack()); + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_RACK, work.getMinSplitSizePerRack()); } } @@ -572,7 +572,7 @@ private void handleSampling(Context context, MapWork mWork, JobConf job) protected void setInputAttributes(Configuration conf) { MapWork mWork = work.getMapWork(); if (mWork.getInputformat() != null) { - HiveConf.setVar(conf, ConfVars.HIVEINPUTFORMAT, mWork.getInputformat()); + HiveConf.setVar(conf, ConfVars.HIVE_INPUT_FORMAT, mWork.getInputformat()); } // Intentionally overwrites anything the user may have put here conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted()); @@ -692,14 +692,14 @@ public static void main(String[] args) throws IOException, HiveException { } } - boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESESSIONSILENT); + boolean isSilent = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SESSION_SILENT); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID, "").trim(); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "").trim(); if(queryId.isEmpty()) { queryId = "unknown-" + System.currentTimeMillis(); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, queryId); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, queryId); } - System.setProperty(HiveConf.ConfVars.HIVEQUERYID.toString(), queryId); + System.setProperty(HiveConf.ConfVars.HIVE_QUERY_ID.toString(), queryId); LogUtils.registerLoggingContext(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java index a5beb633bcb9..f7a658ea9242 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java @@ -236,7 +236,7 @@ public static class ReportStats implements Operator.OperatorFunc { public ReportStats(Reporter rp, Configuration conf) { this.rp = rp; - this.groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP); + this.groupName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); } @Override diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java index c365d41a8204..62d6e40d02c1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/HadoopJobExecHelper.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.exec.mr; import java.io.IOException; -import java.io.Serializable; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Calendar; @@ -143,7 +142,7 @@ public void setJobId(JobID jobId) { public HadoopJobExecHelper(JobConf job, LogHelper console, Task task, HadoopJobExecHook hookCallBack) { - this.queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVEQUERYID, "unknown-" + System.currentTimeMillis()); + this.queryId = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_QUERY_ID, "unknown-" + System.currentTimeMillis()); this.job = job; this.console = console; this.task = task; @@ -205,10 +204,10 @@ public boolean checkFatalErrors(Counters ctrs, StringBuilder errMsg) { return false; } // check for number of created files - Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVECOUNTERGROUP), + Counters.Counter cntr = ctrs.findCounter(HiveConf.getVar(job, ConfVars.HIVE_COUNTER_GROUP), Operator.HIVE_COUNTER_CREATED_FILES); long numFiles = cntr != null ? cntr.getValue() : 0; - long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAXCREATEDFILES); + long upperLimit = HiveConf.getLongVar(job, HiveConf.ConfVars.MAX_CREATED_FILES); if (numFiles > upperLimit) { errMsg.append("total number of created files now is " + numFiles + ", which exceeds ").append(upperLimit); return true; @@ -226,7 +225,7 @@ private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockEx job, HiveConf.ConfVars.HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL, TimeUnit.MILLISECONDS); boolean fatal = false; StringBuilder errMsg = new StringBuilder(); - long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVECOUNTERSPULLINTERVAL); + long pullInterval = HiveConf.getLongVar(job, HiveConf.ConfVars.HIVE_COUNTERS_PULL_INTERVAL); boolean initializing = true; boolean initOutputPrinted = false; long cpuMsec = -1; @@ -437,7 +436,7 @@ private MapRedStats progress(ExecDriverTaskHandle th) throws IOException, LockEx //Set the number of table rows affected in mapRedStats to display number of rows inserted. if (ctrs != null) { Counter counter = ctrs.findCounter( - ss.getConf().getVar(HiveConf.ConfVars.HIVECOUNTERGROUP), + ss.getConf().getVar(HiveConf.ConfVars.HIVE_COUNTER_GROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); if (counter != null) { mapRedStats.setNumModifiedRows(counter.getValue()); @@ -474,7 +473,7 @@ public void jobInfo(RunningJob rj) { } console.printInfo(getJobStartMsg(rj.getID()) + ", Tracking URL = " + rj.getTrackingURL()); - console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPREDBIN) + console.printInfo("Kill Command = " + HiveConf.getVar(job, ConfVars.MAPRED_BIN) + " job -kill " + rj.getID()); } } @@ -557,14 +556,14 @@ public int progress(RunningJob rj, JobClient jc, Context ctx) throws IOException // remove the pwd from conf file so that job tracker doesn't show this // logs - String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); + String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTORE_PWD); if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, "HIVE"); } // replace it back if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, pwd); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, pwd); } // add to list of running jobs to kill in case of abnormal shutdown @@ -654,7 +653,7 @@ private Map extractAllCounterValues(Counters counters) { private List getClientStatPublishers() { List clientStatsPublishers = new ArrayList(); - String confString = HiveConf.getVar(job, HiveConf.ConfVars.CLIENTSTATSPUBLISHERS); + String confString = HiveConf.getVar(job, HiveConf.ConfVars.CLIENT_STATS_PUBLISHERS); confString = confString.trim(); if (confString.equals("")) { return clientStatsPublishers; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java index 386358493650..6c8b9ff00d7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapRedTask.java @@ -107,7 +107,7 @@ public int execute() { // auto-determine local mode if allowed if (!ctx.isLocalOnlyExecutionMode() && - conf.getBoolVar(HiveConf.ConfVars.LOCALMODEAUTO)) { + conf.getBoolVar(HiveConf.ConfVars.LOCAL_MODE_AUTO)) { if (inputSummary == null) { inputSummary = Utilities.getInputSummary(ctx, work.getMapWork(), null); @@ -142,7 +142,7 @@ public int execute() { } } - runningViaChild = conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD); + runningViaChild = conf.getBoolVar(HiveConf.ConfVars.SUBMIT_VIA_CHILD); if (!runningViaChild) { // since we are running the mapred task in the same jvm, we should update the job conf @@ -172,7 +172,7 @@ public int execute() { super.setInputAttributes(conf); // enable assertion - String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN); + String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOP_BIN); String hiveJar = conf.getJar(); String libJars = super.getResource(conf, ResourceType.JAR); @@ -247,7 +247,7 @@ public int execute() { // if we are running in local mode - then the amount of memory used // by the child jvm can no longer default to the memory used by the // parent jvm - int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVE_HADOOP_MAX_MEM); if (hadoopMem == 0) { // remove env var that would default child jvm to use parent's memory // as default. child jvm would use default memory for a hadoop client @@ -453,13 +453,13 @@ private void setNumberOfReducers() throws IOException { } console .printInfo("In order to change the average load for a reducer (in bytes):"); - console.printInfo(" set " + HiveConf.ConfVars.BYTESPERREDUCER.varname + console.printInfo(" set " + HiveConf.ConfVars.BYTES_PER_REDUCER.varname + "="); console.printInfo("In order to limit the maximum number of reducers:"); - console.printInfo(" set " + HiveConf.ConfVars.MAXREDUCERS.varname + console.printInfo(" set " + HiveConf.ConfVars.MAX_REDUCERS.varname + "="); console.printInfo("In order to set a constant number of reducers:"); - console.printInfo(" set " + HiveConf.ConfVars.HADOOPNUMREDUCERS + console.printInfo(" set " + HiveConf.ConfVars.HADOOP_NUM_REDUCERS + "="); } } @@ -478,13 +478,13 @@ public static String isEligibleForLocalMode(HiveConf conf, long inputLength, long inputFileCount) { - long maxBytes = conf.getLongVar(HiveConf.ConfVars.LOCALMODEMAXBYTES); - long maxInputFiles = conf.getIntVar(HiveConf.ConfVars.LOCALMODEMAXINPUTFILES); + long maxBytes = conf.getLongVar(HiveConf.ConfVars.LOCAL_MODE_MAX_BYTES); + long maxInputFiles = conf.getIntVar(HiveConf.ConfVars.LOCAL_MODE_MAX_INPUT_FILES); // check for max input size if (inputLength > maxBytes) { return "Input Size (= " + inputLength + ") is larger than " + - HiveConf.ConfVars.LOCALMODEMAXBYTES.varname + " (= " + maxBytes + ")"; + HiveConf.ConfVars.LOCAL_MODE_MAX_BYTES.varname + " (= " + maxBytes + ")"; } // ideally we would like to do this check based on the number of splits @@ -494,7 +494,7 @@ public static String isEligibleForLocalMode(HiveConf conf, if (inputFileCount > maxInputFiles) { return "Number of Input Files (= " + inputFileCount + ") is larger than " + - HiveConf.ConfVars.LOCALMODEMAXINPUTFILES.varname + "(= " + maxInputFiles + ")"; + HiveConf.ConfVars.LOCAL_MODE_MAX_INPUT_FILES.varname + "(= " + maxInputFiles + ")"; } // since local mode only runs with 1 reducers - make sure that the diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java index 62b74dc8425d..0a781a825809 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/MapredLocalTask.java @@ -152,7 +152,7 @@ public boolean requireLock() { @Override public int execute() { - if (conf.getBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD)) { + if (conf.getBoolVar(HiveConf.ConfVars.SUBMIT_LOCAL_TASK_VIA_CHILD)) { // send task off to another jvm return executeInChildVM(); } else { @@ -167,8 +167,8 @@ private int executeInChildVM() { // generate the cmd line to run in the child jvm String hiveJar = conf.getJar(); - String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOPBIN); - conf.setVar(ConfVars.HIVEADDEDJARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); + String hadoopExec = conf.getVar(HiveConf.ConfVars.HADOOP_BIN); + conf.setVar(ConfVars.HIVE_ADDED_JARS, Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR)); // write out the plan to a local file Path planPath = new Path(context.getLocalTmpPath(), "plan.xml"); MapredLocalWork plan = getWork(); @@ -235,8 +235,8 @@ private int executeInChildVM() { // if we are running in local mode - then the amount of memory used // by the child jvm can no longer default to the memory used by the // parent jvm - // int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); - int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVEHADOOPMAXMEM); + // int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVE_HADOOP_MAX_MEM); + int hadoopMem = conf.getIntVar(HiveConf.ConfVars.HIVE_HADOOP_MAX_MEM); if (hadoopMem == 0) { // remove env var that would default child jvm to use parent's memory // as default. child jvm would use default memory for a hadoop client diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java index 92a3df4fc8a7..9e116e0e243f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/BytesBytesMultiHashMap.java @@ -624,8 +624,8 @@ private static void validateCapacity(long capacity) { if (capacity > Integer.MAX_VALUE) { throw new RuntimeException("Attempting to expand the hash table to " + capacity + " that overflows maximum array size. For this query, you may want to disable " - + ConfVars.HIVEDYNAMICPARTITIONHASHJOIN.varname + " or reduce " - + ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD.varname); + + ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN.varname + " or reduce " + + ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD.varname); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java index ae84d2d6f06d..452ef5991e86 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HashMapWrapper.java @@ -32,7 +32,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.NonMatchedSmallTableIterator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBase; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBatch; @@ -72,15 +71,15 @@ public HashMapWrapper(Map metaData) { } public HashMapWrapper() { - this(HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT.defaultFloatVal, - HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD.defaultIntVal, - HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR.defaultFloatVal, -1); + this(HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT.defaultFloatVal, + HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD.defaultIntVal, + HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR.defaultFloatVal, -1); } public HashMapWrapper(Configuration hconf, long keyCount) { - this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), keyCount); + this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR), keyCount); } private HashMapWrapper(float keyCountAdj, int threshold, float loadFactor, long keyCount) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java index e66977f758a8..d82c43df84c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/HybridHashTableContainer.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hive.ql.exec.JoinUtil.JoinResult; import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinBytesTableContainer.KeyValueHelper; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.NonMatchedSmallTableIterator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.rowbytescontainer.VectorRowBytesContainer; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBase; @@ -279,15 +278,15 @@ public int size() { public HybridHashTableContainer(Configuration hconf, long keyCount, long memoryAvailable, long estimatedTableSize, HybridHashTableConf nwayConf) throws SerDeException, IOException { - this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT), - HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINBLOOMFILTER), + this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MEMCHECK_FREQ), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_WB_SIZE), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_OPTIMIZED_TABLE_PROBE_PERCENT), + HiveConf.getBoolVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_BLOOMFILTER), estimatedTableSize, keyCount, memoryAvailable, nwayConf, HiveUtils.getLocalDirList(hconf)); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java index 79695975ef26..ac3570900775 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinBytesTableContainer.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ExprNodeEvaluator; import org.apache.hadoop.hive.ql.exec.JoinUtil; -import org.apache.hadoop.hive.ql.exec.persistence.MapJoinTableContainer.NonMatchedSmallTableIterator; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpressionWriter; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBase; import org.apache.hadoop.hive.ql.exec.vector.wrapper.VectorHashKeyWrapperBatch; @@ -104,10 +103,10 @@ public class MapJoinBytesTableContainer public MapJoinBytesTableContainer(Configuration hconf, MapJoinObjectSerDeContext valCtx, long keyCount, long memUsage) throws SerDeException { - this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD), - HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE), + this(HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD), + HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_WB_SIZE), valCtx, keyCount, memUsage); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java index 8ee54fe8a1d8..4100bbccab5d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplDumpTask.java @@ -252,7 +252,7 @@ public int execute() { DumpMetaData dmd = new DumpMetaData(hiveDumpRoot, conf); // Initialize ReplChangeManager instance since we will require it to encode file URI. ReplChangeManager.getInstance(conf); - Path cmRoot = new Path(conf.getVar(HiveConf.ConfVars.REPLCMDIR)); + Path cmRoot = new Path(conf.getVar(HiveConf.ConfVars.REPL_CM_DIR)); Long lastReplId; LOG.info("Data copy at load enabled : {}", conf.getBoolVar(HiveConf.ConfVars.REPL_RUN_DATA_COPY_TASKS_ON_TARGET)); if (isFailoverTarget) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java index 6ce83ee3e700..690e9a298a0c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/ReplLoadTask.java @@ -92,7 +92,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.LinkedList; @@ -436,8 +435,8 @@ a database ( directory ) } private boolean isReadOnlyHookRegistered() { - return conf.get(HiveConf.ConfVars.PREEXECHOOKS.varname) != null && - conf.get(HiveConf.ConfVars.PREEXECHOOKS.varname).contains(READ_ONLY_HOOK); + return conf.get(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname) != null && + conf.get(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname).contains(READ_ONLY_HOOK); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java index 7383d018ece7..cf4c5a57c7a8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/bootstrap/load/util/PathInfo.java @@ -41,7 +41,7 @@ public class PathInfo { public PathInfo(HiveConf hiveConf) { this.hiveConf = hiveConf; - stagingDir = HiveConf.getVar(hiveConf, HiveConf.ConfVars.STAGINGDIR); + stagingDir = HiveConf.getVar(hiveConf, HiveConf.ConfVars.STAGING_DIR); } public Map getFsScratchDirs() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java index 61516a8604da..2c42c9b6156f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/repl/util/ReplUtils.java @@ -70,7 +70,6 @@ import org.apache.hadoop.hive.ql.parse.repl.dump.metric.IncrementalDumpMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.load.metric.BootstrapLoadMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.load.metric.IncrementalLoadMetricCollector; -import org.apache.hadoop.hive.ql.parse.repl.load.metric.PreOptimizedBootstrapLoadMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.ReplicationMetricCollector; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Metadata; import org.apache.hadoop.hive.ql.parse.repl.metric.event.Status; @@ -480,7 +479,7 @@ public static boolean failedWithNonRecoverableError(Path dumpRoot, HiveConf conf } public static Path getEncodedDumpRootPath(HiveConf conf, String dbname) throws UnsupportedEncodingException { - return new Path(conf.getVar(HiveConf.ConfVars.REPLDIR), + return new Path(conf.getVar(HiveConf.ConfVars.REPL_DIR), Base64.getEncoder().encodeToString(dbname .getBytes(StandardCharsets.UTF_8.name()))); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java index b9be333761e6..09d2ff719333 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/DagUtils.java @@ -23,6 +23,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Function; +import com.google.common.base.Strings; import com.google.common.base.Preconditions; import com.google.common.collect.Iterators; import com.google.common.collect.Lists; @@ -372,28 +373,28 @@ private JobConf initializeVertexConf(JobConf baseConf, Context context, MapWork } if (mapWork.getMaxSplitSize() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, mapWork.getMaxSplitSize().longValue()); } if (mapWork.getMinSplitSize() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, mapWork.getMinSplitSize().longValue()); } if (mapWork.getMinSplitSizePerNode() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERNODE, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_NODE, mapWork.getMinSplitSizePerNode().longValue()); } if (mapWork.getMinSplitSizePerRack() != null) { - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZEPERRACK, + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE_PER_RACK, mapWork.getMinSplitSizePerRack().longValue()); } Utilities.setInputAttributes(conf, mapWork); - String inpFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT); + String inpFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT); if (mapWork.isUseBucketizedHiveInputFormat()) { inpFormat = BucketizedHiveInputFormat.class.getName(); @@ -665,10 +666,10 @@ private Map createPartitionerConf(String partitionerClassName, * container size isn't set. */ public static Resource getContainerResource(Configuration conf) { - int memorySizeMb = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE); + int memorySizeMb = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE); if (memorySizeMb <= 0) { LOG.warn("No Tez container size specified by {}. Falling back to MapReduce container MB {}", - HiveConf.ConfVars.HIVETEZCONTAINERSIZE, MRJobConfig.MAP_MEMORY_MB); + HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE, MRJobConfig.MAP_MEMORY_MB); memorySizeMb = conf.getInt(MRJobConfig.MAP_MEMORY_MB, MRJobConfig.DEFAULT_MAP_MEMORY_MB); // When config is explicitly set to "-1" defaultValue does not work! if (memorySizeMb <= 0) { @@ -676,17 +677,19 @@ public static Resource getContainerResource(Configuration conf) { memorySizeMb = MRJobConfig.DEFAULT_MAP_MEMORY_MB; } } - int cpuCores = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCPUVCORES); + int cpuCores = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TEZ_CPU_VCORES); if (cpuCores <= 0) { LOG.warn("No Tez VCore size specified by {}. Falling back to MapReduce container VCores {}", - HiveConf.ConfVars.HIVETEZCPUVCORES, MRJobConfig.MAP_CPU_VCORES); + HiveConf.ConfVars.HIVE_TEZ_CPU_VCORES, MRJobConfig.MAP_CPU_VCORES); cpuCores = conf.getInt(MRJobConfig.MAP_CPU_VCORES, MRJobConfig.DEFAULT_MAP_CPU_VCORES); if (cpuCores <= 0) { LOG.warn("Falling back to default container VCores {}", MRJobConfig.DEFAULT_MAP_CPU_VCORES); cpuCores = MRJobConfig.DEFAULT_MAP_CPU_VCORES; } } - return Resource.newInstance(memorySizeMb, cpuCores); + Resource resource = Resource.newInstance(memorySizeMb, cpuCores); + LOG.debug("Tez container resource: {}", resource); + return resource; } /* @@ -705,9 +708,9 @@ Map getContainerEnvironment(Configuration conf, boolean isMap) { * are set */ private static String getContainerJavaOpts(Configuration conf) { - String javaOpts = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZJAVAOPTS); + String javaOpts = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_JAVA_OPTS); - String logLevel = HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZLOGLEVEL); + String logLevel = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_LOG_LEVEL); List logProps = Lists.newArrayList(); TezUtils.addLog4jSystemProperties(logLevel, logProps); StringBuilder sb = new StringBuilder(); @@ -716,19 +719,18 @@ private static String getContainerJavaOpts(Configuration conf) { } logLevel = sb.toString(); - if (HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVETEZCONTAINERSIZE) > 0) { - if (javaOpts != null) { - return javaOpts + " " + logLevel; - } else { - return logLevel; - } + String finalOpts = null; + if (HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE) > 0) { + finalOpts = Strings.nullToEmpty(javaOpts) + " " + logLevel; } else { if (javaOpts != null && !javaOpts.isEmpty()) { - LOG.warn(HiveConf.ConfVars.HIVETEZJAVAOPTS + " will be ignored because " - + HiveConf.ConfVars.HIVETEZCONTAINERSIZE + " is not set!"); + LOG.warn(HiveConf.ConfVars.HIVE_TEZ_JAVA_OPTS + " will be ignored because " + + HiveConf.ConfVars.HIVE_TEZ_CONTAINER_SIZE + " is not set!"); } - return logLevel + " " + MRHelpers.getJavaOptsForMRMapper(conf); + finalOpts = logLevel + " " + MRHelpers.getJavaOptsForMRMapper(conf); } + LOG.debug("Tez container final opts: {}", finalOpts); + return finalOpts; } private Vertex createVertexFromMergeWork(JobConf conf, MergeJoinWork mergeJoinWork, @@ -1100,7 +1102,7 @@ public List localizeTempFilesFromConf( String hdfsDirPathStr, Configuration conf) throws IOException, LoginException { List tmpResources = new ArrayList(); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEADDFILESUSEHDFSLOCATION)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ADD_FILES_USE_HDFS_LOCATION)) { // reference HDFS based resource directly, to use distribute cache efficiently. addHdfsResource(conf, tmpResources, LocalResourceType.FILE, getHdfsTempFilesFromConf(conf)); // local resources are session based. @@ -1146,7 +1148,7 @@ private static String[] getHdfsTempFilesFromConf(Configuration conf) { private static String[] getLocalTempFilesFromConf(Configuration conf) { String addedFiles = Utilities.getLocalResourceFiles(conf, SessionState.ResourceType.FILE); String addedJars = Utilities.getLocalResourceFiles(conf, SessionState.ResourceType.JAR); - String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); + String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUX_JARS); String reloadableAuxJars = SessionState.get() == null ? null : SessionState.get().getReloadableAuxJars(); String allFiles = HiveStringUtils.joinIgnoringEmpty(new String[]{auxJars, reloadableAuxJars, addedJars, addedFiles}, ','); @@ -1159,13 +1161,13 @@ public static String[] getTempFilesFromConf(Configuration conf) { } String addedFiles = Utilities.getResourceFiles(conf, SessionState.ResourceType.FILE); if (StringUtils.isNotBlank(addedFiles)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDFILES, addedFiles); + HiveConf.setVar(conf, ConfVars.HIVE_ADDED_FILES, addedFiles); } String addedJars = Utilities.getResourceFiles(conf, SessionState.ResourceType.JAR); if (StringUtils.isNotBlank(addedJars)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars); + HiveConf.setVar(conf, ConfVars.HIVE_ADDED_JARS, addedJars); } - String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS); + String auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_AUX_JARS); String reloadableAuxJars = SessionState.get() == null ? null : SessionState.get().getReloadableAuxJars(); // need to localize the additional jars and files @@ -1178,7 +1180,7 @@ public static String[] getTempFilesFromConf(Configuration conf) { private static String[] getTempArchivesFromConf(Configuration conf) { String addedArchives = Utilities.getResourceFiles(conf, SessionState.ResourceType.ARCHIVE); if (StringUtils.isNotBlank(addedArchives)) { - HiveConf.setVar(conf, ConfVars.HIVEADDEDARCHIVES, addedArchives); + HiveConf.setVar(conf, ConfVars.HIVE_ADDED_ARCHIVES, addedArchives); return addedArchives.split(","); } return new String[0]; @@ -1272,7 +1274,7 @@ public String getExecJarPathLocal(Configuration configuration) { } } catch (Exception ignored) {} //Fall back to hive config, if the uri could not get, or it does not point to a .jar file - String jar = configuration.get(ConfVars.HIVEJAR.varname); + String jar = configuration.get(ConfVars.HIVE_JAR.varname); if (!StringUtils.isBlank(jar)) { return jar; } @@ -1466,7 +1468,7 @@ public JobConf createConfiguration(HiveConf hiveConf, boolean skipAMConf) throws conf.set(MRJobConfig.OUTPUT_KEY_CLASS, HiveKey.class.getName()); conf.set(MRJobConfig.OUTPUT_VALUE_CLASS, BytesWritable.class.getName()); - conf.set("mapred.partitioner.class", HiveConf.getVar(conf, HiveConf.ConfVars.HIVEPARTITIONER)); + conf.set("mapred.partitioner.class", HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_PARTITIONER)); conf.set("tez.runtime.partitioner.class", MRPartitioner.class.getName()); // Removing job credential entry/ cannot be set on the tasks @@ -1699,7 +1701,7 @@ public String createDagName(Configuration conf, QueryPlan plan) { } public static String getUserSpecifiedDagName(Configuration conf) { - String name = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYNAME); + String name = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_NAME); return (name != null) ? name : conf.get("mapred.job.name"); } @@ -1712,7 +1714,7 @@ public static String getUserSpecifiedDagName(Configuration conf) { * TODO This method is temporary. Ideally Hive should only need to pass to Tez the amount of memory * it requires to do the map join, and Tez should take care of figuring out how much to allocate * Adjust the percentage of memory to be reserved for the processor from Tez - * based on the actual requested memory by the Map Join, i.e. HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD + * based on the actual requested memory by the Map Join, i.e. HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD * @return the adjusted percentage */ static double adjustMemoryReserveFraction(long memoryRequested, HiveConf conf) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java index 023d4d371d6c..8df866a9ca78 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HashTableLoader.java @@ -79,7 +79,7 @@ public void init(ExecMapperContext context, MapredContext mrContext, Configurati this.hconf = hconf; this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, ""); String counterName = Utilities.getVertexCounterName(HashTableLoaderCounters.HASHTABLE_LOAD_TIME_MS.name(), vertexName); this.htLoadCounter = tezContext.getTezProcessorContext().getCounters().findCounter(counterGroup, counterName); @@ -100,7 +100,7 @@ public void load(MapJoinTableContainer[] mapJoinTables, } boolean useOptimizedTables = HiveConf.getBoolVar( - hconf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); + hconf, HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE); boolean useHybridGraceHashJoin = desc.isHybridHashJoin(); boolean isFirstKey = true; @@ -109,13 +109,13 @@ public void load(MapJoinTableContainer[] mapJoinTables, LOG.info("Memory manager allocates " + totalMapJoinMemory + " bytes for the loading hashtable."); if (totalMapJoinMemory <= 0) { totalMapJoinMemory = HiveConf.getLongVar( - hconf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + hconf, HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); } long processMaxMemory = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax(); if (totalMapJoinMemory > processMaxMemory) { float hashtableMemoryUsage = HiveConf.getFloatVar( - hconf, HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE); + hconf, HiveConf.ConfVars.HIVE_HASHTABLE_FOLLOWBY_GBY_MAX_MEMORY_USAGE); LOG.warn("totalMapJoinMemory value of " + totalMapJoinMemory + " is greater than the max memory size of " + processMaxMemory); // Don't want to attempt to grab more memory than we have available .. percentage is a bit arbitrary @@ -153,8 +153,8 @@ public void load(MapJoinTableContainer[] mapJoinTables, int numPartitions = 0; try { numPartitions = HybridHashTableContainer.calcNumPartitions(memory, maxSize, - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS), - HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE)); + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS), + HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE)); } catch (IOException e) { throw new HiveException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java index 2a68b20c27b8..46df46bbdf66 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/HiveSplitGenerator.java @@ -23,14 +23,11 @@ import java.util.Arrays; import java.util.BitSet; import java.util.Comparator; -import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Optional; import java.util.Set; -import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import org.apache.hadoop.fs.BlockLocation; @@ -191,7 +188,7 @@ public List initialize() throws Exception { int availableSlots = getAvailableSlotsCalculator().getAvailableSlots(); - if (HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1) <= 1) { + if (HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1) <= 1) { // broken configuration from mapred-default.xml final long blockSize = conf.getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); @@ -199,7 +196,7 @@ public List initialize() throws Exception { TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE, TezMapReduceSplitsGrouper.TEZ_GROUPING_SPLIT_MIN_SIZE_DEFAULT); final long preferredSplitSize = Math.min(blockSize / 2, minGrouping); - HiveConf.setLongVar(jobConf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, preferredSplitSize); + HiveConf.setLongVar(jobConf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, preferredSplitSize); LOG.info("The preferred split size is " + preferredSplitSize); } @@ -216,7 +213,7 @@ public List initialize() throws Exception { InputSplit[] splits; if (generateSingleSplit && - conf.get(HiveConf.ConfVars.HIVETEZINPUTFORMAT.varname).equals(HiveInputFormat.class.getName())) { + conf.get(HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT.varname).equals(HiveInputFormat.class.getName())) { MapWork mapWork = Utilities.getMapWork(jobConf); List paths = Utilities.getInputPathsTez(jobConf, mapWork); FileSystem fs = paths.get(0).getFileSystem(jobConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java index a1593cc8e372..5a31f22b200a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MapRecordProcessor.java @@ -99,7 +99,7 @@ public class MapRecordProcessor extends RecordProcessor { public MapRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception { super(jconf, context); - String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_QUERY_ID); if (LlapProxy.isDaemon()) { setLlapOfFragmentId(context); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java index 2e87b3e44344..6c2fb4be7217 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/MergeFileRecordProcessor.java @@ -93,7 +93,7 @@ void init( .initialize(); } - String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_QUERY_ID); cache = ObjectCacheFactory.getCache(jconf, queryId, true); try { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java index 39c098b13738..c46082988582 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/ReduceRecordProcessor.java @@ -86,7 +86,7 @@ public class ReduceRecordProcessor extends RecordProcessor { public ReduceRecordProcessor(final JobConf jconf, final ProcessorContext context) throws Exception { super(jconf, context); - String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(jconf, HiveConf.ConfVars.HIVE_QUERY_ID); cache = ObjectCacheFactory.getCache(jconf, queryId, true); dynamicValueCache = ObjectCacheFactory.getCache(jconf, queryId, false, true); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezAvailableSlotsCalculator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezAvailableSlotsCalculator.java index 731ad0942f81..e0117a08ba6d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezAvailableSlotsCalculator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezAvailableSlotsCalculator.java @@ -20,11 +20,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.tez.runtime.api.InputInitializerContext; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Default implementation of AvailableSlotsCalculator which relies on available capacity of the cluster */ public class TezAvailableSlotsCalculator implements AvailableSlotsCalculator { + private static final Logger LOG = LoggerFactory.getLogger(TezAvailableSlotsCalculator.class); + private InputInitializerContext inputInitializerContext; @Override public void initialize(Configuration conf, HiveSplitGenerator splitGenerator) { @@ -39,6 +43,9 @@ public int getAvailableSlots() { } int totalResource = inputInitializerContext.getTotalAvailableResource().getMemory(); int taskResource = inputInitializerContext.getVertexTaskResource().getMemory(); - return totalResource / taskResource; + int availableSlots = totalResource / taskResource;; + LOG.debug("totalResource: {}mb / taskResource: {}mb = availableSlots: {}", totalResource, taskResource, + availableSlots); + return availableSlots; } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/ContextNode.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezRuntimeException.java similarity index 58% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/ContextNode.java rename to ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezRuntimeException.java index f983430f4a1f..a95b94a56812 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/ContextNode.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezRuntimeException.java @@ -15,28 +15,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.ql.exec.tez; + +import org.apache.hadoop.hive.ql.metadata.HiveException; /** - * Used for storing values in {@link ThreadLocal}. By using the {@link #getParent()} property, it is possible to have a - * stack-like set of values to support embedded/nested contexts. - * @param The type of the value to store + * An exception class to be thrown by TezTask to provide further details for certain parts of Hive. */ -public class ContextNode { - - private final ContextNode parent; - private final T value; +public class TezRuntimeException extends HiveException { + private static final long serialVersionUID = 1L; - public ContextNode(ContextNode parent, T value) { - this.parent = parent; - this.value = value; - } + private String dagId = null; - public ContextNode getParent() { - return parent; + public TezRuntimeException(String dagId, String diagnostics) { + super(diagnostics); + this.dagId = dagId; } - public T getValue() { - return value; + public String getDagId() { + return dagId; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java index 0cf6bf0e313c..33d4210fb226 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPool.java @@ -298,7 +298,7 @@ private void configureAmRegistry(SessionType session) { bySessionId.put(session.getSessionId(), session); HiveConf conf = session.getConf(); conf.set(ConfVars.LLAP_TASK_SCHEDULER_AM_REGISTRY_NAME.varname, amRegistryName); - conf.set(ConfVars.HIVESESSIONID.varname, session.getSessionId()); + conf.set(ConfVars.HIVE_SESSION_ID.varname, session.getSessionId()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java index 7fbd1573ee77..c87c968b5c8f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionPoolManager.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.exec.tez; -import org.apache.hadoop.hive.ql.exec.tez.TezSessionState.HiveResources; - import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -278,7 +276,7 @@ private TezSessionState getSession(HiveConf conf, boolean doOpen) throws Excepti // TODO Session re-use completely disabled for doAs=true. Always launches a new session. boolean nonDefaultUser = conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS); - boolean jobNameSet = !HiveConf.getVar(conf, ConfVars.HIVETEZJOBNAME).equals("HIVE-%s"); + boolean jobNameSet = !HiveConf.getVar(conf, ConfVars.HIVE_TEZ_JOB_NAME).equals("HIVE-%s"); /* * if the user has specified a queue name themselves or job name is set, we create a new diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index 015e826d8e0f..c2a9ae5203b1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -363,7 +363,7 @@ protected void openInternal(String[] additionalFilesNotFromConf, */ HiveConfUtil.updateCredentialProviderPasswordForJobs(tezConfig); - String tezJobNameFormat = HiveConf.getVar(conf, ConfVars.HIVETEZJOBNAME); + String tezJobNameFormat = HiveConf.getVar(conf, ConfVars.HIVE_TEZ_JOB_NAME); final TezClient session = TezClient.newBuilder(String.format(tezJobNameFormat, sessionId), tezConfig) .setIsSession(true).setLocalResources(commonLocalResources) .setCredentials(llapCredentials).setServicePluginDescriptor(servicePluginsDescriptor) @@ -608,7 +608,7 @@ private void setupSessionAcls(Configuration tezConf, HiveConf hiveConf) throws String loginUser = loginUserUgi == null ? null : loginUserUgi.getShortUserName(); boolean addHs2User = - HiveConf.getBoolVar(hiveConf, ConfVars.HIVETEZHS2USERACCESS); + HiveConf.getBoolVar(hiveConf, ConfVars.HIVE_TEZ_HS2_USER_ACCESS); String viewStr = Utilities.getAclStringWithHiveModification(tezConf, TezConfiguration.TEZ_AM_VIEW_ACLS, addHs2User, user, loginUser); @@ -794,12 +794,12 @@ private Path createTezDir(String sessionId, String suffix) throws IOException { // tez needs its own scratch dir (per session) // TODO: De-link from SessionState. A TezSession can be linked to different Hive Sessions via the pool. SessionState sessionState = SessionState.get(); - String hdfsScratchDir = sessionState == null ? HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR) : sessionState + String hdfsScratchDir = sessionState == null ? HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR) : sessionState .getHdfsScratchDirURIString(); Path tezDir = new Path(hdfsScratchDir, TEZ_DIR); tezDir = new Path(tezDir, sessionId + ((suffix == null) ? "" : ("-" + suffix))); FileSystem fs = tezDir.getFileSystem(conf); - FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION)); + FsPermission fsPermission = new FsPermission(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR_PERMISSION)); fs.mkdirs(tezDir, fsPermission); // Make sure the path is normalized (we expect validation to pass since we just created it). tezDir = DagUtils.validateTargetDir(tezDir, conf).getPath(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java index 32942ef98a71..57f2a0c05f50 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezTask.java @@ -148,7 +148,7 @@ public int execute() { Context ctx = null; Ref sessionRef = Ref.from(null); - final String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + final String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); try { // Get or create Context object. If we create it we have to clean it later as well. @@ -258,11 +258,11 @@ public int execute() { LogUtils.putToMDC(LogUtils.DAGID_KEY, dagId); // finally monitor will print progress until the job is done - TezJobMonitor monitor = new TezJobMonitor(work.getAllWork(), dagClient, conf, dag, ctx, counters); + TezJobMonitor monitor = new TezJobMonitor(work.getAllWork(), dagClient, conf, dag, ctx, counters, perfLogger); rc = monitor.monitorExecution(); if (rc != 0) { - this.setException(new HiveException(monitor.getDiagnostics())); + this.setException(new TezRuntimeException(dagId, monitor.getDiagnostics())); } try { @@ -397,7 +397,7 @@ private void collectCommitInformation(TezWork work) throws IOException, TezExcep private void updateNumRows() { if (counters != null) { TezCounter counter = counters.findCounter( - conf.getVar(HiveConf.ConfVars.HIVECOUNTERGROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); + conf.getVar(HiveConf.ConfVars.HIVE_COUNTER_GROUP), FileSinkOperator.TOTAL_TABLE_ROWS_WRITTEN); if (counter != null) { queryState.setNumModifiedRows(counter.getValue()); } @@ -486,8 +486,8 @@ DAG build(JobConf conf, TezWork tezWork, Path scratchDir, Context ctx, .put("description", ctx.getCmd()); String dagInfo = json.toString(); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); - dag.setConf(HiveConf.ConfVars.HIVEQUERYID.varname, queryId); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); + dag.setConf(HiveConf.ConfVars.HIVE_QUERY_ID.varname, queryId); LOG.debug("DagInfo: {}", dagInfo); @@ -596,7 +596,7 @@ private static void setAccessControlsForCurrentUser(DAG dag, String queryId, String loginUser = loginUserUgi == null ? null : loginUserUgi.getShortUserName(); boolean addHs2User = - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVETEZHS2USERACCESS); + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TEZ_HS2_USER_ACCESS); // Temporarily re-using the TEZ AM View ACLs property for individual dag access control. // Hive may want to setup it's own parameters if it wants to control per dag access. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java index 33f9a8a34d26..2c71296772fd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/WorkloadManager.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import com.fasterxml.jackson.annotation.JsonAutoDetect; -import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.SerializationFeature; import com.google.common.annotations.VisibleForTesting; @@ -1568,7 +1567,7 @@ public WmTezSession getSession(TezSessionState session, MappingInput input, Hive WmEvent wmEvent = new WmEvent(WmEvent.EventType.GET); // Note: not actually used for pool sessions; verify some things like doAs are not set. validateConfig(conf); - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); SettableFuture future = SettableFuture.create(); WmTezSession wmSession = checkSessionForReuse(session); GetRequest req = new GetRequest( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java index c039342d1ed8..045fb13e70be 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/DAGSummary.java @@ -28,7 +28,6 @@ import org.apache.tez.common.counters.TezCounter; import org.apache.tez.common.counters.TezCounters; import org.apache.tez.dag.api.DAG; -import org.apache.tez.dag.api.TezConfiguration; import org.apache.tez.dag.api.TezException; import org.apache.tez.dag.api.Vertex; import org.apache.tez.dag.api.client.DAGClient; @@ -73,7 +72,7 @@ class DAGSummary implements PrintSummary { this.dagClient = dagClient; this.dag = dag; this.perfLogger = perfLogger; - this.hiveCountersGroup = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVECOUNTERGROUP); + this.hiveCountersGroup = HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); this.hiveCounters = hiveCounters(dagClient); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java index 17ab6e39397a..fbbbf189e48f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/tez/monitoring/TezJobMonitor.java @@ -77,7 +77,7 @@ public class TezJobMonitor { private static final int MAX_RETRY_INTERVAL = 2500; private static final int MAX_RETRY_FAILURES = (MAX_RETRY_INTERVAL / MAX_CHECK_INTERVAL) + 1; - private final PerfLogger perfLogger = SessionState.getPerfLogger(); + private final PerfLogger perfLogger; private static final List shutdownList; private final List topSortedWorks; @@ -117,7 +117,7 @@ public static void initShutdownHook() { private final TezCounters counters; public TezJobMonitor(List topSortedWorks, final DAGClient dagClient, HiveConf conf, DAG dag, - Context ctx, final TezCounters counters) { + Context ctx, final TezCounters counters, PerfLogger perfLogger) { this.topSortedWorks = topSortedWorks; this.dagClient = dagClient; this.hiveConf = conf; @@ -126,6 +126,7 @@ public TezJobMonitor(List topSortedWorks, final DAGClient dagClient, H console = SessionState.getConsole(); updateFunction = updateFunction(); this.counters = counters; + this.perfLogger = perfLogger; } private RenderStrategy.UpdateFunction updateFunction() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java index 9cfe9fbec155..47694f241540 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorFilterOperator.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hive.ql.exec.vector.expressions.ConstantVectorExpression; import org.apache.hadoop.hive.ql.exec.vector.expressions.VectorExpression; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.VectorDesc; @@ -84,7 +83,7 @@ protected void initializeOp(Configuration hconf) throws HiveException { VectorExpression.doTransientInit(predicateExpression, hconf); try { heartbeatInterval = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVESENDHEARTBEAT); + HiveConf.ConfVars.HIVE_SEND_HEARTBEAT); predicateExpression.init(hconf); } catch (Throwable e) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java index c2dfaeb074eb..505db9e5e611 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/VectorGroupByOperator.java @@ -405,7 +405,7 @@ public void initialize(Configuration hconf) throws HiveException { this.maxHtEntries = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_VECTORIZATION_GROUPBY_MAXENTRIES); this.numRowsCompareHashAggr = HiveConf.getIntVar(hconf, - HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL); + HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL); } else { this.percentEntriesToFlush = @@ -415,7 +415,7 @@ public void initialize(Configuration hconf) throws HiveException { this.maxHtEntries = HiveConf.ConfVars.HIVE_VECTORIZATION_GROUPBY_MAXENTRIES.defaultIntVal; this.numRowsCompareHashAggr = - HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL.defaultIntVal; + HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL.defaultIntVal; } minReductionHashAggr = getConf().getMinReductionHashAggr(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java index 836c1477e4f7..c288feb8f9f9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/VectorInBloomFilterColDynamicValue.java @@ -102,7 +102,7 @@ public void init(Configuration conf) { throw new IllegalStateException("Unsupported type " + colVectorType); } - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); runtimeCache = ObjectCacheFactory.getCache(conf, queryId, false, true); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java index 460f05b0e174..cfada606eb16 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastHashTableLoader.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.mapjoin.MapJoinMemoryExhaustionError; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; import org.apache.hive.common.util.FixedSizedObjectPool; import org.apache.tez.common.counters.TezCounter; import org.slf4j.Logger; @@ -90,7 +89,7 @@ public VectorMapJoinFastHashTableLoader(TezContext context, Configuration hconf, this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); this.htLoadCounter = this.tezContext.getTezProcessorContext().getCounters().findCounter( - HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP), hconf.get(Operator.CONTEXT_NAME_KEY, "")); + HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP), hconf.get(Operator.CONTEXT_NAME_KEY, "")); } @Override @@ -100,7 +99,7 @@ public void init(ExecMapperContext context, MapredContext mrContext, this.hconf = hconf; this.desc = joinOp.getConf(); this.cacheKey = joinOp.getCacheKey(); - String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String counterGroup = HiveConf.getVar(hconf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); String vertexName = hconf.get(Operator.CONTEXT_NAME_KEY, ""); String counterName = Utilities.getVertexCounterName(HashTableLoaderCounters.HASHTABLE_LOAD_TIME_MS.name(), vertexName); this.htLoadCounter = tezContext.getTezProcessorContext().getCounters().findCounter(counterGroup, counterName); @@ -111,7 +110,7 @@ private void initHTLoadingService(long estKeyCount) { // Avoid many small HTs that will rehash multiple times causing GCs this.numLoadThreads = 1; } else { - int initialValue = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEMAPJOINPARALELHASHTABLETHREADS); + int initialValue = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_MAPJOIN_PARALEL_HASHTABLE_THREADS); Preconditions.checkArgument(initialValue > 0, "The number of HT-loading-threads should be positive."); int adjustedValue = Integer.highestOneBit(initialValue); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java index 1b281caf0ecd..20ae779f2215 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/fast/VectorMapJoinFastTableContainer.java @@ -62,9 +62,9 @@ public VectorMapJoinFastTableContainer(MapJoinDesc desc, Configuration hconf, this.desc = desc; this.hconf = hconf; - keyCountAdj = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEKEYCOUNTADJUSTMENT); - threshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLETHRESHOLD); - loadFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVEHASHTABLELOADFACTOR); + keyCountAdj = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_KEY_COUNT_ADJUSTMENT); + threshold = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_THRESHOLD); + loadFactor = HiveConf.getFloatVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_LOAD_FACTOR); this.numHTs = numHTs; this.estimatedKeyCount = estimatedKeys > numHTs ? (estimatedKeys/ numHTs) : estimatedKeys; @@ -98,7 +98,7 @@ private VectorMapJoinFastHashTableContainerBase createHashTables(int newThreshol boolean isFullOuter = vectorDesc.getIsFullOuter(); boolean minMaxEnabled = vectorDesc.getMinMaxEnabled(); - int writeBufferSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVEHASHTABLEWBSIZE); + int writeBufferSize = HiveConf.getIntVar(hconf, HiveConf.ConfVars.HIVE_HASHTABLE_WB_SIZE); VectorMapJoinFastHashTableContainerBase htWrapper = null; switch (hashTableKeyType) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java index 4171f2038c29..dff1f51da603 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/vector/mapjoin/optimized/VectorMapJoinOptimizedCreateHashTable.java @@ -55,6 +55,7 @@ public static VectorMapJoinOptimizedHashTable createHashTable(MapJoinDesc desc, case SHORT: case INT: case LONG: + case DATE: switch (hashTableKind) { case HASH_MAP: hashTable = new VectorMapJoinOptimizedLongHashMap( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java index 205a5464f1ed..5261a1beb1c5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/history/HiveHistoryImpl.java @@ -21,7 +21,6 @@ import java.io.File; import java.io.IOException; import java.io.PrintWriter; -import java.io.Serializable; import java.util.HashMap; import java.util.Map; import java.util.Random; @@ -80,7 +79,7 @@ public HiveHistoryImpl(SessionState ss) { try { console = new LogHelper(LOG); String conf_file_loc = ss.getConf().getVar( - HiveConf.ConfVars.HIVEHISTORYFILELOC); + HiveConf.ConfVars.HIVE_HISTORY_FILE_LOC); if ((conf_file_loc == null) || conf_file_loc.length() == 0) { console.printError("No history file location given"); return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java index 74d6ac4ce017..c0e3bb30054c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HiveProtoLoggingHook.java @@ -426,7 +426,7 @@ private HiveHookEventProtoPartialBuilder getPreHookEvent(HookContext hookContext plan.getOptimizedQueryString(), plan.getOptimizedCBOPlan()); return new HiveHookEventProtoPartialBuilder( - builder, explainWork, otherInfo, plan.getQueryStr(), conf.getVar(ConfVars.HIVESTAGEIDREARRANGE)); + builder, explainWork, otherInfo, plan.getQueryStr(), conf.getVar(ConfVars.HIVE_STAGE_ID_REARRANGE)); } private HiveHookEventProtoPartialBuilder getPostHookEvent(HookContext hookContext, boolean success) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java index cd23b247063a..f3fc63ac3e4b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/HookContext.java @@ -49,11 +49,11 @@ public class HookContext { static public enum HookType { - PRE_EXEC_HOOK(HiveConf.ConfVars.PREEXECHOOKS, ExecuteWithHookContext.class, + PRE_EXEC_HOOK(HiveConf.ConfVars.PRE_EXEC_HOOKS, ExecuteWithHookContext.class, "Pre-execution hooks to be invoked for each statement"), - POST_EXEC_HOOK(HiveConf.ConfVars.POSTEXECHOOKS, ExecuteWithHookContext.class, + POST_EXEC_HOOK(HiveConf.ConfVars.POST_EXEC_HOOKS, ExecuteWithHookContext.class, "Post-execution hooks to be invoked for each statement"), - ON_FAILURE_HOOK(HiveConf.ConfVars.ONFAILUREHOOKS, ExecuteWithHookContext.class, + ON_FAILURE_HOOK(HiveConf.ConfVars.ON_FAILURE_HOOKS, ExecuteWithHookContext.class, "On-failure hooks to be invoked for each statement"), QUERY_LIFETIME_HOOKS(HiveConf.ConfVars.HIVE_QUERY_LIFETIME_HOOKS, QueryLifeTimeHook.class, "Hooks that will be triggered before/after query compilation and before/after query execution"), @@ -61,7 +61,7 @@ static public enum HookType { "Hooks that invoked before/after Hive performs its own semantic analysis on a statement"), DRIVER_RUN_HOOKS(HiveConf.ConfVars.HIVE_DRIVER_RUN_HOOKS, HiveDriverRunHook.class, "Hooks that Will be run at the beginning and end of Driver.run"), - QUERY_REDACTOR_HOOKS(HiveConf.ConfVars.QUERYREDACTORHOOKS, Redactor.class, + QUERY_REDACTOR_HOOKS(HiveConf.ConfVars.QUERY_REDACTOR_HOOKS, Redactor.class, "Hooks to be invoked for each query which can transform the query before it's placed in the job.xml file"), // The HiveSessionHook.class cannot access, use Hook.class instead HIVE_SERVER2_SESSION_HOOK(HiveConf.ConfVars.HIVE_SERVER2_SESSION_HOOK, Hook.class, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java index 102b2b517312..7633ac85612f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/PostExecTezSummaryPrinter.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hive.ql.exec.tez.CompileTimeCounters; import org.apache.hadoop.hive.ql.exec.tez.HiveInputCounters; import org.apache.tez.common.counters.FileSystemCounter; -import org.apache.tez.dag.api.client.DAGClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -62,7 +61,7 @@ public void run(HookContext hookContext) throws Exception { LOG.info("Printing summary for tez task: " + tezTask.getName()); TezCounters counters = tezTask.getTezCounters(); if (counters != null) { - String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVECOUNTERGROUP); + String hiveCountersGroup = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_COUNTER_GROUP); for (CounterGroup group : counters) { if (hiveCountersGroup.equals(group.getDisplayName())) { console.printInfo(tezTask.getId() + " HIVE COUNTERS:", false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java index febe8a2b1e90..4c63a74d853e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidUtils.java @@ -92,7 +92,7 @@ import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.txn.CompactionState; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionState; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.ErrorMsg; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java index 987411401e4f..ab90142162cf 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java @@ -553,7 +553,7 @@ private void addSplitsForGroup(List dirs, TableScanOperator tableScan, Job } else { // if the input is Compressed OR not text we have no way of splitting them! // In that case RecordReader should take care of header/footer skipping! - HiveConf.setLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, Long.MAX_VALUE); + HiveConf.setLongVar(conf, ConfVars.MAPRED_MIN_SPLIT_SIZE, Long.MAX_VALUE); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java index 6eba2e5b71a6..90bd7339a71d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFile.java @@ -1057,7 +1057,7 @@ void init(Configuration conf, FSDataOutputStream out, this.out = out; this.codec = codec; this.metadata = metadata; - this.useNewMagic = conf.getBoolean(HIVEUSEEXPLICITRCFILEHEADER.varname, true); + this.useNewMagic = conf.getBoolean(HIVE_USE_EXPLICIT_RCFILE_HEADER.varname, true); } /** Returns the compression codec of data in this file. */ diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java index 6d93de0b29c4..d6ae00bfa8d4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RCFileRecordReader.java @@ -102,7 +102,7 @@ public RCFileRecordReader(Configuration conf, FileSplit split) this.conf = conf; this.split = split; - useCache = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEUSERCFILESYNCCACHE); + useCache = HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_USE_RCFILE_SYNC_CACHE); if (split.getStart() > in.getPosition()) { long oldSync = useCache ? syncCache.get(split) : -1; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java index 7d808c25d254..dac1393976d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/merge/MergeFileTask.java @@ -118,7 +118,7 @@ public int execute() { String jobName = null; if (noName && this.getQueryPlan() != null) { - int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6); } @@ -137,9 +137,9 @@ public int execute() { Utilities.setMapWork(job, work, ctx.getMRTmpPath(), true); // remove pwd from conf file so that job tracker doesn't show this logs - String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); + String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTORE_PWD); if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, "HIVE"); } // submit the job diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java index c19c49f266bc..bae96b1b67bc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/ExternalCache.java @@ -43,7 +43,6 @@ import org.apache.hadoop.hive.ql.io.sarg.SearchArgumentFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.shims.HadoopShims.HdfsFileStatusWithId; -import org.apache.orc.OrcProto; import org.apache.orc.impl.OrcTail; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +97,7 @@ public boolean hasPpd() { public void configure(HiveConf queryConfig) { this.conf = queryConfig; this.sarg = ConvertAstToSearchArg.createFromConf(conf); - this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVEOPTINDEXFILTER) + this.isPpdEnabled = HiveConf.getBoolVar(conf, ConfVars.HIVE_OPT_INDEX_FILTER) && HiveConf.getBoolVar(conf, ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD); this.isInTest = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); this.sargIsOriginal = this.sargNotIsOriginal = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 730ede4e5a02..564836144f0b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.ql.io.orc; import org.apache.commons.collections.CollectionUtils; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hive.common.BlobStorageUtils; import org.apache.hadoop.hive.common.NoDynamicValuesException; @@ -333,7 +332,7 @@ public static int getRootColumn(boolean isOriginal) { public static void raiseAcidTablesMustBeReadWithAcidReaderException(Configuration conf) throws IOException { - String hiveInputFormat = HiveConf.getVar(conf, ConfVars.HIVEINPUTFORMAT); + String hiveInputFormat = HiveConf.getVar(conf, ConfVars.HIVE_INPUT_FORMAT); if (hiveInputFormat.equals(HiveInputFormat.class.getName())) { throw new IOException(ErrorMsg.ACID_TABLES_MUST_BE_READ_WITH_ACID_READER.getErrorCodedMsg()); } else { @@ -717,8 +716,8 @@ static class Context { this.isVectorMode = Utilities.getIsVectorized(conf); this.forceThreadpool = HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST); this.sarg = ConvertAstToSearchArg.createFromConf(conf); - minSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMINSPLITSIZE, DEFAULT_MIN_SPLIT_SIZE); - maxSize = HiveConf.getLongVar(conf, ConfVars.MAPREDMAXSPLITSIZE, DEFAULT_MAX_SPLIT_SIZE); + minSize = HiveConf.getLongVar(conf, ConfVars.MAPRED_MIN_SPLIT_SIZE, DEFAULT_MIN_SPLIT_SIZE); + maxSize = HiveConf.getLongVar(conf, ConfVars.MAPRED_MAX_SPLIT_SIZE, DEFAULT_MAX_SPLIT_SIZE); String ss = conf.get(ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname); if (ss == null || ss.equals(SplitStrategyKind.HYBRID.name())) { splitStrategyKind = SplitStrategyKind.HYBRID; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index cf73c9551ebd..3710ee71c7c9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -359,7 +359,7 @@ private static TypeDescription getTypeDescriptionFromTableProperties(Properties writerOptions.getConfiguration().set(OrcConf.DICTIONARY_KEY_SIZE_THRESHOLD.getAttribute(), "-1.0"); } } - if(!HiveConf.getBoolVar(options.getConfiguration(), HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP)) { + if(!HiveConf.getBoolVar(options.getConfiguration(), HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP)) { writerOptions.fileSystem(fs).callback(indexBuilder); } rowInspector = (StructObjectInspector)options.getInspector(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java index 881d56cd31f2..a3a123ee00cb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java @@ -412,8 +412,9 @@ public List readSchema(Configuration conf, String file) throws SerD FileMetaData metadata; try { HadoopInputFile inputFile = HadoopInputFile.fromPath(new Path(file), conf); - ParquetFileReader reader = ParquetFileReader.open(inputFile); - metadata = reader.getFileMetaData(); + try(ParquetFileReader reader = ParquetFileReader.open(inputFile)) { + metadata = reader.getFileMetaData(); + } } catch (Exception e) { throw new SerDeException(ErrorMsg.PARQUET_FOOTER_ERROR.getErrorCodedMsg(), e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java index fe98dd0aee4e..43a47a95bb5b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/rcfile/truncate/ColumnTruncateTask.java @@ -78,7 +78,7 @@ public boolean requireLock() { */ @Override public int execute() { - HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT, BucketizedHiveInputFormat.class.getName()); success = true; HiveFileFormatUtils.prepareJobOutput(job); @@ -105,16 +105,16 @@ public int execute() { job.setBoolean(MRJobConfig.MAP_SPECULATIVE, false); if (work.getMinSplitSize() != null) { - HiveConf.setLongVar(job, HiveConf.ConfVars.MAPREDMINSPLITSIZE, work + HiveConf.setLongVar(job, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, work .getMinSplitSize().longValue()); } if (work.getInputformat() != null) { - HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, work + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT, work .getInputformat()); } - String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT); + String inpFormat = HiveConf.getVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT); LOG.info("Using " + inpFormat); try { @@ -146,7 +146,7 @@ public int execute() { String jobName = null; if (noName && this.getQueryPlan() != null) { - int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); + int maxlen = conf.getIntVar(HiveConf.ConfVars.HIVE_JOBNAME_LENGTH); jobName = Utilities.abbreviate(this.getQueryPlan().getQueryStr(), maxlen - 6); } @@ -166,9 +166,9 @@ public int execute() { // remove the pwd from conf file so that job tracker doesn't show this // logs - String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTOREPWD); + String pwd = HiveConf.getVar(job, HiveConf.ConfVars.METASTORE_PWD); if (pwd != null) { - HiveConf.setVar(job, HiveConf.ConfVars.METASTOREPWD, "HIVE"); + HiveConf.setVar(job, HiveConf.ConfVars.METASTORE_PWD, "HIVE"); } JobClient jc = new JobClient(job); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index b60570b1ec73..c4013fc2c789 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -268,7 +268,7 @@ long openTxn(Context ctx, String user, TxnType txnType, long delay) throws LockE shouldReallocateWriteIds = false; isExplicitTransaction = false; startTransactionCount = 0; - this.queryId = ctx.getConf().get(HiveConf.ConfVars.HIVEQUERYID.varname); + this.queryId = ctx.getConf().get(HiveConf.ConfVars.HIVE_QUERY_ID.varname); LOG.info("Opened " + JavaUtils.txnIdToString(txnId)); ctx.setHeartbeater(startHeartbeat(delay)); return txnId; @@ -727,7 +727,7 @@ private Heartbeater startHeartbeat(long initialDelay) throws LockException { private ScheduledFuture startHeartbeat(long initialDelay, long heartbeatInterval, Runnable heartbeater) { // For negative testing purpose.. - if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER)) { + if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER)) { initialDelay = 0; } else if (initialDelay == 0) { /*make initialDelay a random number in [0, 0.75*heartbeatInterval] so that if a lot @@ -1120,8 +1120,8 @@ public LockException getLockException() { public void run() { try { // For negative testing purpose.. - if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER)) { - throw new LockException(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER.name() + "=true"); + if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER)) { + throw new LockException(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER.name() + "=true"); } LOG.debug("Heartbeating...for currentUser: " + currentUser); currentUser.doAs((PrivilegedExceptionAction) () -> { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 5022b77fc368..23aea1d31810 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -40,8 +40,8 @@ import static org.apache.hadoop.hive.ql.ddl.DDLUtils.isIcebergStatsSource; import static org.apache.hadoop.hive.ql.ddl.DDLUtils.isIcebergTable; import static org.apache.hadoop.hive.ql.io.AcidUtils.getFullTableName; -import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.CALCITE; -import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL; +import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.CALCITE; +import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.ALL; import static org.apache.hadoop.hive.ql.optimizer.calcite.rules.views.HiveMaterializedViewUtils.extractTable; import static org.apache.hadoop.hive.serde.serdeConstants.SERIALIZATION_FORMAT; import static org.apache.hadoop.hive.serde.serdeConstants.STRING_TYPE_NAME; @@ -2236,7 +2236,7 @@ public HiveRelOptMaterialization getMaterializedViewForRebuild(String dbName, St private List getValidMaterializedViews(List materializedViewTables, Set tablesUsed, boolean forceMVContentsUpToDate, boolean expandGroupingSets, - HiveTxnManager txnMgr, EnumSet scope) + HiveTxnManager txnMgr, EnumSet scope) throws HiveException { final String validTxnsList = conf.get(ValidTxnList.VALID_TXNS_KEY); final boolean tryIncrementalRewriting = @@ -2801,7 +2801,7 @@ private Partition loadPartitionInternal(Path loadPath, Table tbl, Map()); - if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (this.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { StatsSetupConst.setStatsStateForCreateTable(newTPart.getParameters(), MetaStoreUtils.getColumnNames(tbl.getCols()), StatsSetupConst.TRUE); } @@ -3020,7 +3020,7 @@ private void setStatsPropAndAlterPartitions(boolean resetStatistics, Table tbl, List partitions, AcidUtils.TableSnapshot tableSnapshot) throws TException { - if (partitions.isEmpty() || conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER)) { + if (partitions.isEmpty() || conf.getBoolVar(ConfVars.HIVE_STATS_AUTOGATHER)) { return; } EnvironmentContext ec = new EnvironmentContext(); @@ -3371,8 +3371,8 @@ public Map, Partition> loadDynamicPartitions(final LoadTable LOG.debug("Cancelling " + futures.size() + " dynamic loading tasks"); executor.shutdownNow(); } - if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION)) { - throw new HiveException(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION.name() + "=true"); + if (HiveConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) && HiveConf.getBoolVar(conf, ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION)) { + throw new HiveException(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION.name() + "=true"); } try { if (isTxnTable) { @@ -3520,7 +3520,7 @@ public void loadTable(Path loadPath, String tableName, LoadFileType loadFileType // If there is no column stats gather stage present in the plan. So we don't know the accuracy of the stats or // auto gather stats is turn off explicitly. We need to reset the stats in both cases. - if (resetStatistics || !this.getConf().getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (resetStatistics || !this.getConf().getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { LOG.debug("Clear table column statistics and set basic statistics to false for " + tbl.getCompleteName()); StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE); } @@ -3680,7 +3680,7 @@ public Partition getPartition(Table tbl, Map partSpec, for (FieldSchema field : tbl.getPartCols()) { String val = partSpec.get(field.getName()); // enable dynamic partitioning - if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) + if ((val == null && !HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING)) || (val != null && val.length() == 0)) { throw new HiveException("get partition: Value for key " + field.getName() + " is null or empty"); @@ -4076,7 +4076,7 @@ public List getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, S exprBytes = SerializationUtilities.serializeObjectWithTypeInformation(expr); } try { - String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULT_PARTITION_NAME); PartitionsByExprRequest req = new PartitionsByExprRequest(tbl.getDbName(), tbl.getTableName(), ByteBuffer.wrap(exprBytes)); if (defaultPartitionName != null) { @@ -4104,49 +4104,20 @@ public List getPartitionNames(Table tbl, ExprNodeGenericFuncDesc expr, S } /** - * get all the partitions that the table has + * get all the partitions that the table has along with auth info * * @param tbl * object for which partition is needed - * @return list of partition objects + * @return list of partition objects along with auth info */ public List getPartitions(Table tbl) throws HiveException { PerfLogger perfLogger = SessionState.getPerfLogger(); perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS); - try { - if (tbl.isPartitioned()) { - List tParts; - try { - GetPartitionsPsWithAuthRequest req = new GetPartitionsPsWithAuthRequest(); - req.setTblName(tbl.getTableName()); - req.setDbName(tbl.getDbName()); - req.setUserName(getUserName()); - req.setMaxParts((short) -1); - req.setGroupNames(getGroupNames()); - if (AcidUtils.isTransactionalTable(tbl)) { - ValidWriteIdList validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName()); - req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null); - req.setId(tbl.getTTable().getId()); - } - GetPartitionsPsWithAuthResponse res = getMSC().listPartitionsWithAuthInfoRequest(req); - tParts = res.getPartitions(); - - } catch (NoSuchObjectException nsoe) { - return Lists.newArrayList(); - } catch (Exception e) { - LOG.error("Failed getPartitions", e); - throw new HiveException(e); - } - List parts = new ArrayList<>(tParts.size()); - for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { - parts.add(new Partition(tbl, tpart)); - } - - return parts; - } else { - return Collections.singletonList(new Partition(tbl)); - } + int batchSize= MetastoreConf.getIntVar(Hive.get().getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); + return new ArrayList<>(getAllPartitionsInBatches(tbl, batchSize, DEFAULT_BATCH_DECAYING_FACTOR, MetastoreConf + .getIntVar(Hive.get().getConf(), MetastoreConf.ConfVars.GETPARTITIONS_BATCH_MAX_RETRIES), + null, true, getUserName(), getGroupNames())); } finally { perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS, "HS2-cache"); } @@ -4186,14 +4157,33 @@ public Set getAllPartitionsOf(Table tbl) throws HiveException { Hive.get().getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); if (batchSize > 0) { return getAllPartitionsInBatches(tbl, batchSize, DEFAULT_BATCH_DECAYING_FACTOR, MetastoreConf.getIntVar( - Hive.get().getConf(), MetastoreConf.ConfVars.GETPARTITIONS_BATCH_MAX_RETRIES)); + Hive.get().getConf(), MetastoreConf.ConfVars.GETPARTITIONS_BATCH_MAX_RETRIES), null, false); } else { return getAllPartitions(tbl); } } public Set getAllPartitionsInBatches(Table tbl, int batchSize, int decayingFactor, - int maxRetries) throws HiveException { + int maxRetries, Map partialPartitionSpec, boolean isAuthRequired) throws HiveException { + return getAllPartitionsInBatches(tbl, batchSize, decayingFactor, maxRetries, partialPartitionSpec, isAuthRequired, + null, null); + } + + /** + * Main method which fetches the partitions in batches + * @param tbl table for which partitions are needed + * @param batchSize Number of partitions to be fectehd in one batched call + * @param decayingFactor the value by which batchSize decays in the next retry in case it faces an exception + * @param maxRetries Number of retries allowed for this operation + * @param partialPartitionSpec partialPartitionSpec for the table + * @param isAuthRequired If auth information is required along with partitions + * @param userName name of the calling user + * @param groupNames groups the call + * @return list of partition objects + */ + public Set getAllPartitionsInBatches(Table tbl, int batchSize, int decayingFactor, + int maxRetries, Map partialPartitionSpec, boolean isAuthRequired, + String userName, List groupNames) throws HiveException { if (!tbl.isPartitioned()) { return Sets.newHashSet(new Partition(tbl)); } @@ -4202,13 +4192,11 @@ public Set getAllPartitionsInBatches(Table tbl, int batchSize, int de .ExponentiallyDecayingBatchWork(batchSize, decayingFactor, maxRetries) { @Override public Void execute(int size) throws HiveException { - try { - result.clear(); - new PartitionIterable(Hive.get(), tbl, null, size).forEach(result::add); - return null; - } catch (HiveException e) { - throw e; - } + result.clear(); + PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partialPartitionSpec, size, + isAuthRequired, userName, groupNames); + partitionIterable.forEach(result::add); + return null; } }; try { @@ -4219,6 +4207,25 @@ public Void execute(int size) throws HiveException { return result; } + public List getPartitions(Table tbl, Map partialPartSpec, + short limit) throws HiveException { + PerfLogger perfLogger = SessionState.getPerfLogger(); + perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2); + try { + // TODO: Implement Batching when limit is >=0 + if (limit >= 0) { + return getPartitionsWithAuth(tbl, partialPartSpec, limit); + } else { + int batchSize = MetastoreConf.getIntVar(Hive.get().getConf(), MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); + return new ArrayList<>(getAllPartitionsInBatches(tbl, batchSize, DEFAULT_BATCH_DECAYING_FACTOR, + MetastoreConf.getIntVar(Hive.get().getConf(), MetastoreConf.ConfVars.GETPARTITIONS_BATCH_MAX_RETRIES), + partialPartSpec, true, getUserName(), getGroupNames())); + } + } finally { + perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2, "HS2-cache"); + } + } + /** * get all the partitions of the table that matches the given partial * specification. partition columns whose value is can be anything should be @@ -4230,36 +4237,30 @@ public Void execute(int size) throws HiveException { * @return list of partition objects * @throws HiveException */ - public List getPartitions(Table tbl, Map partialPartSpec, - short limit) - throws HiveException { - PerfLogger perfLogger = SessionState.getPerfLogger(); - perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2); - try { - if (!tbl.isPartitioned()) { - throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); - } - - List partialPvals = MetaStoreUtils.getPvals(tbl.getPartCols(), partialPartSpec); + private List getPartitionsWithAuth(Table tbl, Map partialPartSpec, + short limit) + throws HiveException { + if (!tbl.isPartitioned()) { + throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); + } - List partitions = null; - try { - String userName = getUserName(); - partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), - partialPvals, limit, userName, getGroupNames()); - } catch (Exception e) { - throw new HiveException(e); - } + List partialPvals = MetaStoreUtils.getPvals(tbl.getPartCols(), partialPartSpec); - List qlPartitions = new ArrayList(); - for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) { - qlPartitions.add(new Partition(tbl, p)); - } + List partitions = null; + try { + String userName = getUserName(); + partitions = getMSC().listPartitionsWithAuthInfo(tbl.getDbName(), tbl.getTableName(), + partialPvals, limit, userName, getGroupNames()); + } catch (Exception e) { + throw new HiveException(e); + } - return qlPartitions; - } finally { - perfLogger.perfLogEnd(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_2, "HS2-cache"); + List qlPartitions = new ArrayList(); + for (org.apache.hadoop.hive.metastore.api.Partition p : partitions) { + qlPartitions.add(new Partition(tbl, p)); } + + return qlPartitions; } /** @@ -4385,6 +4386,37 @@ public List getPartitionsByNames(Table tbl, List partNames, b return partitions; } + public List getPartitionsAuthByNames(Table tbl, List partNames, String userName, + List groupNames) throws HiveException { + if (!tbl.isPartitioned()) { + throw new HiveException(ErrorMsg.TABLE_NOT_PARTITIONED, tbl.getTableName()); + } + GetPartitionsPsWithAuthRequest req = new GetPartitionsPsWithAuthRequest(); + req.setTblName(tbl.getTableName()); + req.setDbName(tbl.getDbName()); + req.setUserName(userName); + req.setGroupNames(groupNames); + req.setPartNames(partNames); + if (AcidUtils.isTransactionalTable(tbl)) { + ValidWriteIdList validWriteIdList = getValidWriteIdList(tbl.getDbName(), tbl.getTableName()); + req.setValidWriteIdList(validWriteIdList != null ? validWriteIdList.toString() : null); + req.setId(tbl.getTTable().getId()); + } + + List tParts; + try { + GetPartitionsPsWithAuthResponse res = getMSC().listPartitionsWithAuthInfoRequest(req); + tParts = res.getPartitions(); + } catch (Exception e) { + throw new HiveException(e); + } + List parts = new ArrayList<>(tParts.size()); + for (org.apache.hadoop.hive.metastore.api.Partition tpart : tParts) { + parts.add(new Partition(tbl, tpart)); + } + return parts; + } + /** * Get a list of Partitions by filter. * @param tbl The table containing the partitions. @@ -4491,7 +4523,7 @@ public boolean getPartitionsByExpr(Table tbl, ExprNodeDesc expr, HiveConf conf, perfLogger.perfLogBegin(CLASS_NAME, PerfLogger.HIVE_GET_PARTITIONS_BY_EXPR); try { Preconditions.checkNotNull(partitions); - String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, ConfVars.DEFAULT_PARTITION_NAME); if (tbl.getStorageHandler() != null && tbl.getStorageHandler().alwaysUnpartitioned()) { partitions.addAll(tbl.getStorageHandler().getPartitionsByExpr(tbl, expr)); return false; @@ -5542,6 +5574,13 @@ private void replaceFiles(Path tablePath, Path srcf, Path destf, Path oldPath, H throw new HiveException("Getting globStatus " + srcf.toString(), e); } + // For insert/load overwrite cases, where external.table.purge is disabled for the table, there may be stale + // partitions present in the table location after Alter table drop partition operation. In such cases, oldPath will be + // null, since those partitions will not be present in metastore. Added below check to clean up those stale partitions. + if (oldPath == null && isInsertOverwrite) { + deleteOldPathForReplace(destf, destf, conf, purge, deletePathFilter, isNeedRecycle); + } + // the extra check is required to make ALTER TABLE ... CONCATENATE work if (oldPath != null && (srcs != null || isInsertOverwrite)) { deleteOldPathForReplace(destf, oldPath, conf, purge, deletePathFilter, isNeedRecycle); @@ -5661,7 +5700,7 @@ private void deleteOldPathForReplace(Path destPath, Path oldPath, HiveConf conf, public void cleanUpOneDirectoryForReplace(Path path, FileSystem fs, PathFilter pathFilter, HiveConf conf, boolean purge, boolean isNeedRecycle) throws IOException, HiveException { - if (isNeedRecycle && conf.getBoolVar(HiveConf.ConfVars.REPLCMENABLED)) { + if (isNeedRecycle && conf.getBoolVar(HiveConf.ConfVars.REPL_CM_ENABLED)) { recycleDirToCmPath(path, purge); } if (!fs.exists(path)) { @@ -5871,7 +5910,7 @@ public synchronized IMetaStoreClient getMSC( } throw ex; } - String metaStoreUris = conf.getVar(HiveConf.ConfVars.METASTOREURIS); + String metaStoreUris = conf.getVar(HiveConf.ConfVars.METASTORE_URIS); if (!org.apache.commons.lang3.StringUtils.isEmpty(metaStoreUris)) { // get a synchronized wrapper if the meta store is remote. metaStoreClient = HiveMetaStoreClient.newSynchronizedClient(metaStoreClient); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java index ca11fcccffaa..9c5bdfe18af9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java @@ -83,9 +83,7 @@ import com.google.common.collect.ImmutableList; import static java.util.stream.Collectors.toList; -import static org.apache.commons.lang3.StringUtils.isBlank; -import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ALL; -import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.TEXT; +import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.ALL; /** * Registry for materialized views. The goal of this cache is to avoid parsing and creating @@ -236,9 +234,7 @@ public HiveRelOptMaterialization createMaterialization(HiveConf conf, Table mate } return new HiveRelOptMaterialization(viewScan, plan.getPlan(), - null, viewScan.getTable().getQualifiedName(), - isBlank(plan.getInvalidAutomaticRewritingMaterializationReason()) ? - EnumSet.allOf(HiveRelOptMaterialization.RewriteAlgorithm.class) : EnumSet.of(TEXT), + null, viewScan.getTable().getQualifiedName(), plan.getSupportedRewriteAlgorithms(), determineIncrementalRebuildMode(plan.getPlan()), plan.getAst()); } @@ -273,7 +269,7 @@ public void createMaterializedView(HiveConf conf, Table materializedViewTable) { } HiveRelOptMaterialization materialization = createMaterialization(conf, materializedViewTable); - if (materialization == null) { + if (materialization == null || materialization.getScope().isEmpty()) { return; } @@ -348,7 +344,7 @@ public void dropMaterializedView(String dbName, String tableName) { */ List getRewritingMaterializedViews() { return materializedViewsCache.values().stream() - .filter(materialization -> materialization.getScope().contains(HiveRelOptMaterialization.RewriteAlgorithm.CALCITE)) + .filter(materialization -> materialization.getScope().contains(RewriteAlgorithm.CALCITE)) .collect(toList()); } @@ -358,7 +354,7 @@ List getRewritingMaterializedViews() { * @return the collection of materialized views, or the empty collection if none */ public HiveRelOptMaterialization getRewritingMaterializedView(String dbName, String viewName, - EnumSet scope) { + EnumSet scope) { HiveRelOptMaterialization materialization = materializedViewsCache.get(dbName, viewName); if (materialization == null) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java index 625dbaeaf3eb..750d674aa4db 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMetaStoreClientWithLocalCache.java @@ -61,8 +61,6 @@ import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest; import org.apache.hadoop.hive.metastore.api.UniqueConstraintsResponse; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator; import org.apache.hadoop.hive.ql.util.IncrementalObjectSizeEstimator.ObjectEstimator; @@ -99,8 +97,8 @@ public static synchronized void init(Configuration conf) { // init cache only once if (!INITIALIZED.get()) { LOG.info("Initializing local cache in HiveMetaStoreClient..."); - maxSize = MetastoreConf.getSizeVar(conf, MetastoreConf.ConfVars.MSC_CACHE_MAX_SIZE); - recordStats = MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.MSC_CACHE_RECORD_STATS); + maxSize = HiveConf.getSizeVar(conf, HiveConf.ConfVars.MSC_CACHE_MAX_SIZE); + recordStats = HiveConf.getBoolVar(conf, HiveConf.ConfVars.MSC_CACHE_RECORD_STATS); initSizeEstimator(); initCache(); LOG.info("Local cache initialized in HiveMetaStoreClient: {}", mscLocalCache); @@ -520,7 +518,7 @@ private boolean isCacheEnabledAndInitialized() { protected String getQueryId() { try { - return Hive.get().getConf().get(HiveConf.ConfVars.HIVEQUERYID.varname); + return Hive.get().getConf().get(HiveConf.ConfVars.HIVE_QUERY_ID.varname); } catch (HiveException e) { LOG.error("Error getting query id. Query level and Global HMS caching will be disabled", e); return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveRelOptMaterialization.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveRelOptMaterialization.java index b4a20cea0fb1..04548bfb801a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveRelOptMaterialization.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveRelOptMaterialization.java @@ -28,7 +28,7 @@ import java.util.EnumSet; import java.util.List; -import java.util.function.Predicate; +import java.util.Set; import static org.apache.commons.collections.CollectionUtils.intersection; @@ -37,27 +37,6 @@ */ public class HiveRelOptMaterialization extends RelOptMaterialization { - /** - * Enumeration of Materialized view query rewrite algorithms. - */ - public enum RewriteAlgorithm { - /** - * Query sql text is compared to stored materialized view definition sql texts. - */ - TEXT, - /** - * Use rewriting algorithm provided by Calcite. - */ - CALCITE; - - public static final EnumSet ALL = EnumSet.allOf(RewriteAlgorithm.class); - - public static final Predicate> ANY = - rewriteAlgorithms -> true; - public static final Predicate> NON_CALCITE = - rewriteAlgorithms -> !rewriteAlgorithms.contains(HiveRelOptMaterialization.RewriteAlgorithm.CALCITE); - } - public enum IncrementalRebuildMode { AVAILABLE, INSERT_ONLY, @@ -65,7 +44,7 @@ public enum IncrementalRebuildMode { UNKNOWN } - private final EnumSet scope; + private final Set scope; private final boolean sourceTablesUpdateDeleteModified; private final boolean sourceTablesCompacted; private final IncrementalRebuildMode rebuildMode; @@ -73,13 +52,13 @@ public enum IncrementalRebuildMode { public HiveRelOptMaterialization(RelNode tableRel, RelNode queryRel, RelOptTable starRelOptTable, List qualifiedTableName, - EnumSet scope, IncrementalRebuildMode rebuildMode, ASTNode ast) { + Set scope, IncrementalRebuildMode rebuildMode, ASTNode ast) { this(tableRel, queryRel, starRelOptTable, qualifiedTableName, scope, false, false, rebuildMode, ast); } private HiveRelOptMaterialization(RelNode tableRel, RelNode queryRel, RelOptTable starRelOptTable, List qualifiedTableName, - EnumSet scope, + Set scope, boolean sourceTablesUpdateDeleteModified, boolean sourceTablesCompacted, IncrementalRebuildMode rebuildMode, ASTNode ast) { super(tableRel, queryRel, starRelOptTable, qualifiedTableName); this.scope = scope; @@ -89,7 +68,7 @@ private HiveRelOptMaterialization(RelNode tableRel, RelNode queryRel, this.ast = ast; } - public EnumSet getScope() { + public Set getScope() { return scope; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java index 9fe61c0bdc32..e9a0d139e90a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveStorageHandler.java @@ -24,7 +24,6 @@ import java.net.URISyntaxException; import java.util.Collections; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.common.type.SnapshotContext; @@ -43,6 +42,7 @@ import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.ddl.table.AbstractAlterTableDesc; import org.apache.hadoop.hive.ql.ddl.table.AlterTableType; +import org.apache.hadoop.hive.ql.ddl.table.create.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.table.create.like.CreateTableLikeDesc; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.hooks.WriteEntity; @@ -378,6 +378,13 @@ default void setTableParametersForCTLT(org.apache.hadoop.hive.ql.metadata.Table Map origParams) { } + /** + * Sets tables physical location at create table as select. + * Some storage handlers requires specifying the location of tables others generates it internally. + */ + default void setTableLocationForCTAS(CreateTableDesc desc, String location) { + } + /** * Extract the native properties of the table which aren't stored in the HMS * @param table the table diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java index d1d197ebece3..ae8849b8528d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveUtils.java @@ -468,4 +468,12 @@ public static Boolean isTableTag(String refName) { Matcher ref = TAG.matcher(refName); return ref.matches(); } + + public static String getLowerCaseTableName(String refName) { + String[] refParts = refName.split("\\."); + if (refParts.length == 3 && SNAPSHOT_REF.matcher(refParts[2]).matches()) { + return (refParts[0] + "." + refParts[1]).toLowerCase() + "." + refParts[2]; + } + return refName.toLowerCase(); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/MaterializationValidationResult.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/MaterializationValidationResult.java new file mode 100644 index 000000000000..3f3372e9c6df --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/MaterializationValidationResult.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.metadata; + +import java.util.EnumSet; +import java.util.Set; + +public class MaterializationValidationResult { + private final EnumSet supportedRewriteAlgorithms; + private final String errorMessage; + + public MaterializationValidationResult( + EnumSet supportedRewriteAlgorithms, String errorMessage) { + this.supportedRewriteAlgorithms = supportedRewriteAlgorithms; + this.errorMessage = errorMessage; + } + + public Set getSupportedRewriteAlgorithms() { + return supportedRewriteAlgorithms; + } + + public String getErrorMessage() { + return errorMessage; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 6b34a2b69729..884aa2016279 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -166,7 +166,7 @@ protected void initialize(Table table, return; } - if (table.isPartitioned()) { + if (table.isPartitioned() && tPartition.isSetSd()) { try { if (tPartition.getSd().getLocation() == null) { // set default if location is not set and this is a physical diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java index e31f9ef2d15c..dabfef014ce7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/PartitionIterable.java @@ -99,7 +99,11 @@ private void getNextBatch() { batchCounter++; } try { - batchIter = db.getPartitionsByNames(table, nameBatch, getColStats).iterator(); + if (isAuthRequired) { + batchIter = db.getPartitionsAuthByNames(table, nameBatch, userName, groupNames).iterator(); + } else { + batchIter = db.getPartitionsByNames(table, nameBatch, getColStats).iterator(); + } } catch (HiveException e) { throw new RuntimeException(e); } @@ -130,6 +134,9 @@ enum Type { private List partitionNames = null; private int batchSize; private boolean getColStats = false; + private boolean isAuthRequired = false; + private String userName; + private List groupNames; /** * Dummy constructor, which simply acts as an iterator on an already-present @@ -150,18 +157,37 @@ public PartitionIterable(Hive db, Table table, Map partialPartit this(db, table, partialPartitionSpec, batchSize, false); } + public PartitionIterable(Hive db, Table table, Map partialPartitionSpec, + int batchSize, boolean isAuthRequired, String userName, + List groupNames) throws HiveException { + this(db, table, partialPartitionSpec, batchSize, false, isAuthRequired, userName, groupNames); + } + + /** * Primary constructor that fetches all partitions in a given table, given * a Hive object and a table object, and a partial partition spec. */ public PartitionIterable(Hive db, Table table, Map partialPartitionSpec, int batchSize, boolean getColStats) throws HiveException { + this(db, table, partialPartitionSpec, batchSize, getColStats, false, null, null); + } + + private PartitionIterable(Hive db, Table table, Map partialPartitionSpec, + int batchSize, boolean getColStats, boolean isAuthRequired, String userName, + List groupNames) throws HiveException { + if (batchSize < 1) { + throw new HiveException("Invalid batch size for partition iterable. Please use a batch size greater than 0"); + } this.currType = Type.LAZY_FETCH_PARTITIONS; this.db = db; this.table = table; this.partialPartitionSpec = partialPartitionSpec; this.batchSize = batchSize; this.getColStats = getColStats; + this.isAuthRequired = isAuthRequired; + this.userName = userName; + this.groupNames = groupNames; if (this.partialPartitionSpec == null){ partitionNames = db.getPartitionNames( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/RewriteAlgorithm.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/RewriteAlgorithm.java new file mode 100644 index 000000000000..308df7299373 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/RewriteAlgorithm.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.metadata; + +import java.util.EnumSet; +import java.util.Set; +import java.util.function.Predicate; + +/** + * Enumeration of Materialized view query rewrite algorithms. + */ +public enum RewriteAlgorithm { + /** + * Query sql text is compared to stored materialized view definition sql texts. + */ + TEXT, + /** + * Use rewriting algorithm provided by Calcite. + */ + CALCITE; + + public static final EnumSet ALL = EnumSet.allOf(RewriteAlgorithm.class); + + public static final Predicate> ANY = + rewriteAlgorithms -> true; + public static final Predicate> NON_CALCITE = + rewriteAlgorithms -> !rewriteAlgorithms.contains(CALCITE); +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 3bb93ed60d1c..ce725a5cdb3e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -823,7 +823,7 @@ private void dropTempTable(org.apache.hadoop.hive.metastore.api.Table table, boo if (pathStr != null) { try { tablePath = new Path(table.getSd().getLocation()); - if (!getWh().isWritable(tablePath.getParent())) { + if (deleteData && !isExternalTable(table) && !getWh().isWritable(tablePath.getParent())) { throw new MetaException("Table metadata not deleted since " + tablePath.getParent() + " is not writable by " + SecurityUtils.getUser()); } @@ -1446,7 +1446,7 @@ public List dropPartitions(String catName, String dbName, String tblN List result = new ArrayList<>(); for (Pair pair : partExprs) { byte[] expr = pair.getRight(); - String filter = generateJDOFilter(table, expr, conf.get(HiveConf.ConfVars.DEFAULTPARTITIONNAME.varname)); + String filter = generateJDOFilter(table, expr, conf.get(HiveConf.ConfVars.DEFAULT_PARTITION_NAME.varname)); List partitions = tt.listPartitionsByFilter(filter); for (Partition p : partitions) { Partition droppedPartition = tt.dropPartition(p.getValues()); @@ -1705,10 +1705,9 @@ private String generateJDOFilter(org.apache.hadoop.hive.metastore.api.Table tabl assert table != null; ExpressionTree.FilterBuilder filterBuilder = new ExpressionTree.FilterBuilder(true); Map params = new HashMap<>(); - exprTree.generateJDOFilterFragment(conf, params, filterBuilder, table.getPartitionKeys()); + exprTree.accept(new ExpressionTree.JDOFilterGenerator(conf, + table.getPartitionKeys(), filterBuilder, params)); StringBuilder stringBuilder = new StringBuilder(filterBuilder.getFilter()); - // replace leading && - stringBuilder.replace(0, 4, ""); params.entrySet().stream().forEach(e -> { int index = stringBuilder.indexOf(e.getKey()); stringBuilder.replace(index, index + e.getKey().length(), "\"" + e.getValue().toString() + "\""); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java index 8ffd41c49f94..e5109b34ef7e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java @@ -112,7 +112,7 @@ public static List getRegistry(Configuration conf) { ArrayList l = new ArrayList(); l.add(BLOCKOFFSET); l.add(FILENAME); - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEROWOFFSET)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_ROW_OFFSET)) { l.add(ROWOFFSET); } l.add(ROWID); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java index 5d4e6cd8c8ce..bab07f179b1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/BucketMapjoinProc.java @@ -49,7 +49,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Throw an error if the user asked for bucketed mapjoin to be enforced and // bucketed mapjoin cannot be performed - if (!convert && conf.getBoolVar(HiveConf.ConfVars.HIVEENFORCEBUCKETMAPJOIN)) { + if (!convert && conf.getBoolVar(HiveConf.ConfVars.HIVE_ENFORCE_BUCKET_MAPJOIN)) { throw new SemanticException(ErrorMsg.BUCKET_MAPJOIN_NOT_POSSIBLE.getMsg()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java index 98a80ab603ee..87b3820f6735 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConvertJoinMapJoin.java @@ -113,7 +113,7 @@ public class ConvertJoinMapJoin implements SemanticNodeProcessor { OptimizeTezProcContext context = (OptimizeTezProcContext) procCtx; - hashTableLoadFactor = context.conf.getFloatVar(ConfVars.HIVEHASHTABLELOADFACTOR); + hashTableLoadFactor = context.conf.getFloatVar(ConfVars.HIVE_HASHTABLE_LOAD_FACTOR); fastHashTableAvailable = context.conf.getBoolVar(ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED); JoinOperator joinOp = (JoinOperator) nd; @@ -133,7 +133,7 @@ public class ConvertJoinMapJoin implements SemanticNodeProcessor { TezBucketJoinProcCtx tezBucketJoinProcCtx = new TezBucketJoinProcCtx(context.conf); - boolean hiveConvertJoin = context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) & + boolean hiveConvertJoin = context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN) & !context.parseContext.getDisableMapJoin(); if (!hiveConvertJoin) { // we are just converting to a common merge join operator. The shuffle @@ -251,7 +251,7 @@ private boolean selectJoinForLlap(OptimizeTezProcContext context, JoinOperator j TezBucketJoinProcCtx tezBucketJoinProcCtx, LlapClusterStateForCompile llapInfo, MapJoinConversion mapJoinConversion, int numBuckets) throws SemanticException { - if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVEDYNAMICPARTITIONHASHJOIN) + if (!context.conf.getBoolVar(HiveConf.ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN) && numBuckets > 1) { // DPHJ is disabled, only attempt BMJ or mapjoin return convertJoinBucketMapJoin(joinOp, context, mapJoinConversion, tezBucketJoinProcCtx); @@ -406,7 +406,7 @@ private static long hashTableDataSizeAdjustment(long numRows, List - HiveConf.getIntVar(context.conf, HiveConf.ConfVars.XPRODSMALLTABLEROWSTHRESHOLD)) { + HiveConf.getIntVar(context.conf, HiveConf.ConfVars.XPROD_SMALL_TABLE_ROWS_THRESHOLD)) { // if any of smaller side is estimated to generate more than // threshold rows we would disable mapjoin return null; @@ -1328,7 +1328,7 @@ public MapJoinOperator convertJoinMapJoin(JoinOperator joinOp, OptimizeTezProcCo } MapJoinDesc mapJoinDesc = mapJoinOp.getConf(); mapJoinDesc.setHybridHashJoin(HiveConf.getBoolVar(context.conf, - HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN)); + HiveConf.ConfVars.HIVE_USE_HYBRIDGRACE_HASHJOIN)); List joinExprs = mapJoinDesc.getKeys().values().iterator().next(); if (joinExprs.size() == 0) { // In case of cross join, we disable hybrid grace hash join mapJoinDesc.setHybridHashJoin(false); @@ -1585,8 +1585,8 @@ private boolean convertJoinDynamicPartitionedHashJoin(JoinOperator joinOp, Optim private void fallbackToReduceSideJoin(JoinOperator joinOp, OptimizeTezProcContext context) throws SemanticException { - if (context.conf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN) && - context.conf.getBoolVar(HiveConf.ConfVars.HIVEDYNAMICPARTITIONHASHJOIN)) { + if (context.conf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN) && + context.conf.getBoolVar(HiveConf.ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN)) { if (convertJoinDynamicPartitionedHashJoin(joinOp, context)) { return; } @@ -1617,7 +1617,7 @@ private void fallbackToMergeJoin(JoinOperator joinOp, OptimizeTezProcContext con private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) { long max = HiveConf.getLongVar(context.parseContext.getConf(), - HiveConf.ConfVars.HIVECONVERTJOINMAXENTRIESHASHTABLE); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_MAX_ENTRIES_HASHTABLE); if (max < 1) { // Max is disabled, we can safely return true return true; @@ -1652,7 +1652,7 @@ private boolean checkNumberOfEntriesForHashTable(JoinOperator joinOp, int positi private boolean checkShuffleSizeForLargeTable(JoinOperator joinOp, int position, OptimizeTezProcContext context) { long max = HiveConf.getLongVar(context.parseContext.getConf(), - HiveConf.ConfVars.HIVECONVERTJOINMAXSHUFFLESIZE); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_MAX_SHUFFLE_SIZE); if (max < 1) { // Max is disabled, we can safely return false return false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java index 32edacba7c3e..29e421136129 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/CountDistinctRewriteProc.java @@ -180,7 +180,7 @@ protected int checkCountDistinct(GroupByOperator mGby, ReduceSinkOperator rs, return -1; } // check if it is potential to trigger nullscan - if (pGraphContext.getConf().getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { + if (pGraphContext.getConf().getBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES)) { for (TableScanOperator tsOp : pGraphContext.getTopOps().values()) { List colIDs = tsOp.getNeededColumnIDs(); TableScanDesc desc = tsOp.getConf(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java index 85a420df8883..bc4bff81d6b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/DynamicPartitionPruningOptimization.java @@ -492,16 +492,16 @@ private void generateEventOperatorPlan(DynamicListContext ctx, ParseContext pars // do a group by on the list to dedup float groupByMemoryUsage = - HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf.getFloatVar(parseContext.getConf(), - HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); List groupByExprs = new ArrayList(); ExprNodeDesc groupByExpr = @@ -613,16 +613,16 @@ private boolean generateSemiJoinOperatorPlan(DynamicListContext ctx, ParseContex // do a group by to aggregate min,max and bloom filter. float groupByMemoryUsage = - HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + HiveConf.getFloatVar(parseContext.getConf(), HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf.getFloatVar(parseContext.getConf(), - HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf.getFloatVar(parseContext.getConf(), - ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); // Add min/max and bloom filter aggregations List aggFnOIs = new ArrayList(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java index 0eea69882a7a..3f1dd2145878 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/FixedBucketPruningOptimizer.java @@ -202,7 +202,7 @@ protected void generatePredicate(NodeProcessorCtx procCtx, BitSet bs = new BitSet(numBuckets); bs.clear(); PrimitiveObjectInspector bucketOI = (PrimitiveObjectInspector)bucketField.getFieldObjectInspector(); - PrimitiveObjectInspector constOI = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(bucketOI.getPrimitiveCategory()); + PrimitiveObjectInspector constOI = PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(bucketOI.getTypeInfo()); // Fetch the bucketing version from table scan operator int bucketingVersion = top.getConf().getTableMetadata().getBucketingVersion(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index 94dc5a5c26ec..525ecfbb13e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -544,7 +544,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set fileSinkOp = OperatorFactory.get( parent.getCompilationOpContext(), desc, parent.getSchema()); @@ -1251,9 +1251,9 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, + " into " + finalName); } - boolean isBlockMerge = (conf.getBoolVar(ConfVars.HIVEMERGERCFILEBLOCKLEVEL) && + boolean isBlockMerge = (conf.getBoolVar(ConfVars.HIVE_MERGE_RCFILE_BLOCK_LEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(RCFileInputFormat.class)) || - (conf.getBoolVar(ConfVars.HIVEMERGEORCFILESTRIPELEVEL) && + (conf.getBoolVar(ConfVars.HIVE_MERGE_ORC_FILE_STRIPE_LEVEL) && fsInputDesc.getTableInfo().getInputFileFormatClass().equals(OrcInputFormat.class)); RowSchema inputRS = fsInput.getSchema(); @@ -1268,7 +1268,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, // Create a FileSink operator TableDesc ts = (TableDesc) fsInputDesc.getTableInfo().clone(); Path mergeDest = srcMmWriteId == null ? finalName : finalName.getParent(); - fsOutputDesc = new FileSinkDesc(mergeDest, ts, conf.getBoolVar(ConfVars.COMPRESSRESULT)); + fsOutputDesc = new FileSinkDesc(mergeDest, ts, conf.getBoolVar(ConfVars.COMPRESS_RESULT)); fsOutputDesc.setMmWriteId(srcMmWriteId); fsOutputDesc.setIsMerge(true); // Create and attach the filesink for the merge. @@ -1316,7 +1316,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, cplan = GenMapRedUtils.createMergeTask(fsInputDesc, finalName, dpCtx != null && dpCtx.getNumDPCols() > 0, fsInput.getCompilationOpContext()); if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf); + work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID), conf); cplan.setName("File Merge"); ((TezWork) work).add(cplan); } else { @@ -1325,7 +1325,7 @@ public static void createMRWorkForMergingFiles(FileSinkOperator fsInput, } else { cplan = createMRWorkForMergingFiles(conf, tsMerge, fsInputDesc); if (conf.getVar(ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { - work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf); + work = new TezWork(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID), conf); cplan.setName("File Merge"); ((TezWork)work).add(cplan); } else { @@ -1894,7 +1894,7 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco fsOp.getConf().isMmTable(), fsOp.getConf().isDirectInsert(), fsOp.getConf().getMoveTaskId(), fsOp.getConf().getAcidOperation()); // TODO: wtf?!! why is this in this method? This has nothing to do with anything. - if (isInsertTable && hconf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) + if (isInsertTable && hconf.getBoolVar(ConfVars.HIVE_STATS_AUTOGATHER) && !fsOp.getConf().isMaterialization()) { // mark the MapredWork and FileSinkOperator for gathering stats fsOp.getConf().setGatherStats(true); @@ -1910,7 +1910,7 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco if (currTask.getWork() instanceof TezWork) { // tez blurs the boundary between map and reduce, thus it has it's own config - return hconf.getBoolVar(ConfVars.HIVEMERGETEZFILES); + return hconf.getBoolVar(ConfVars.HIVE_MERGE_TEZ_FILES); } return isMergeRequiredForMr(hconf, fsOp, currTask); } @@ -1918,12 +1918,12 @@ public static boolean isMergeRequired(List> mvTasks, HiveConf hco private static boolean isMergeRequiredForMr(HiveConf hconf, FileSinkOperator fsOp, Task currTask) { if (fsOp.getConf().isLinkedFileSink()) { - // If the user has HIVEMERGEMAPREDFILES set to false, the idea was the + // If the user has HIVE_MERGE_MAPRED_FILES set to false, the idea was the // number of reducers are few, so the number of files anyway are small. // However, with this optimization, we are increasing the number of files // possibly by a big margin. So, merge aggressively. - return (hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) || - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES)); + return (hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPFILES) || + hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPRED_FILES)); } // There are separate configuration parameters to control whether to // merge for a map-only job @@ -1931,9 +1931,9 @@ private static boolean isMergeRequiredForMr(HiveConf hconf, if (currTask.getWork() instanceof MapredWork) { ReduceWork reduceWork = ((MapredWork) currTask.getWork()).getReduceWork(); boolean mergeMapOnly = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPFILES) && reduceWork == null; + hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPFILES) && reduceWork == null; boolean mergeMapRed = - hconf.getBoolVar(ConfVars.HIVEMERGEMAPREDFILES) && + hconf.getBoolVar(ConfVars.HIVE_MERGE_MAPRED_FILES) && reduceWork != null; if (mergeMapOnly || mergeMapRed) { return true; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java index f52d5652b608..ec2a6ccb818c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java @@ -83,7 +83,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { Map opRules = new LinkedHashMap(); HiveConf conf = pctx.getConf(); - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { // process group-by pattern opRules.put(new RuleRegExp("R1", GroupByOperator.getOperatorName() + "%" + @@ -188,7 +188,7 @@ protected void processGroupBy(GroupByOptimizerContext ctx, if (!groupByOpDesc.isDistinct()) { removeReduceSink = true; } - else if (!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + else if (!HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { // Optimize the query: select count(distinct keys) from T, where // T is bucketized and sorted by T // Partial aggregation can be done by the mappers in this scenario diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java index 4cae3b26a3fd..8b71ea65f283 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/LimitPushdownOptimizer.java @@ -251,10 +251,10 @@ private static class LimitPushdownContext implements NodeProcessorCtx { private final float threshold; public LimitPushdownContext(HiveConf conf) throws SemanticException { - threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + threshold = conf.getFloatVar(HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); if (threshold <= 0 || threshold >= 1) { throw new SemanticException("Invalid memory usage value " + threshold + - " for " + HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + " for " + HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java index e922ce477964..839db8f9bbc6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java @@ -413,7 +413,7 @@ private static boolean checkFullOuterMapJoinCompatible(HiveConf hiveConf, boolean isVectorizationMapJoinNativeEnabled = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED); boolean isHybridHashJoin = HiveConf.getBoolVar(hiveConf, - HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN); + HiveConf.ConfVars.HIVE_USE_HYBRIDGRACE_HASHJOIN); if (isVectorizationMapJoinNativeEnabled && isHybridHashJoin) { LOG.debug("FULL OUTER MapJoin not enabled: Native Vector MapJoin and Hybrid Grace not supported"); return false; @@ -469,12 +469,12 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join final boolean isEnabled = HiveConf.getBoolVar( hiveConf, - HiveConf.ConfVars.HIVEMAPJOINFULLOUTER); + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER); switch (mapJoinFullOuterOverride) { case NONE: { if (!isEnabled) { - LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname); + LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER.varname); return false; } } @@ -483,18 +483,18 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join if (LOG.isDebugEnabled()) { LOG.debug("FULL OUTER MapJoin not enabled: " + HiveConf.ConfVars.HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE.varname + " is disable (" + - " " + HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname + " is " + isEnabled + ")"); + " " + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER.varname + " is " + isEnabled + ")"); } return false; case ENABLE: // Different parts of the code may rely on this being set... HiveConf.setBoolVar(hiveConf, - HiveConf.ConfVars.HIVEMAPJOINFULLOUTER, true); + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER, true); if (LOG.isDebugEnabled()) { LOG.debug("FULL OUTER MapJoin is enabled: " + HiveConf.ConfVars.HIVE_TEST_MAPJOINFULLOUTER_OVERRIDE.varname + " is enable (" + - " " + HiveConf.ConfVars.HIVEMAPJOINFULLOUTER.varname + " is " + isEnabled + ")"); + " " + HiveConf.ConfVars.HIVE_MAPJOIN_FULL_OUTER.varname + " is " + isEnabled + ")"); } break; default: @@ -520,9 +520,9 @@ public static boolean isFullOuterMapEnabled(HiveConf hiveConf, JoinOperator join final boolean isOptimizedHashTableEnabled = HiveConf.getBoolVar( hiveConf, - HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); + HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE); if (!isOptimizedHashTableEnabled) { - LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE.varname); + LOG.debug("FULL OUTER MapJoin not enabled: {} is false", HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE.varname); return false; } @@ -652,8 +652,8 @@ public MapJoinOperator generateMapJoinOperator(ParseContext pctx, JoinOperator o int mapJoinPos) throws SemanticException { HiveConf hiveConf = pctx.getConf(); boolean noCheckOuterJoin = HiveConf.getBoolVar(hiveConf, - HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN); + HiveConf.ConfVars.HIVE_OPT_SORT_MERGE_BUCKET_MAPJOIN) + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_BUCKET_MAPJOIN); MapJoinOperator mapJoinOp = convertMapJoin(pctx.getConf(), op, op.getConf().isLeftInputJoin(), op.getConf().getBaseSrc(), @@ -1306,7 +1306,8 @@ public static MapJoinDesc getMapJoinDesc(HiveConf hconf, List keyExprList = ExprNodeDescUtils.resolveJoinKeysAsRSColumns(mapEntry.getValue(), rsParent); if (keyExprList == null) { - throw new SemanticException("Error resolving join keys"); + LOG.warn("Error resolving join keys {} in {} {}", mapEntry.getValue(), rsParent, rsParent.getColumnExprMap()); + return null; } newKeyExprMap.put(pos, keyExprList); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java index 41bdf77f5fbb..3341be88fd10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/Optimizer.java @@ -74,7 +74,7 @@ public void initialize(HiveConf hiveConf) { // Add the transformation that computes the lineage information. Set postExecHooks = Sets.newHashSet( Splitter.on(",").trimResults().omitEmptyStrings().split( - Strings.nullToEmpty(HiveConf.getVar(hiveConf, HiveConf.ConfVars.POSTEXECHOOKS)))); + Strings.nullToEmpty(HiveConf.getVar(hiveConf, HiveConf.ConfVars.POST_EXEC_HOOKS)))); if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_LINEAGE_INFO) || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") @@ -83,33 +83,33 @@ public void initialize(HiveConf hiveConf) { } // Try to transform OR predicates in Filter into simpler IN clauses first - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER) && !pctx.getContext().isCboSucceeded()) { final int min = HiveConf.getIntVar(hiveConf, - HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN); + HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER_MIN); transformations.add(new PointLookupOptimizer(min)); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEPARTITIONCOLUMNSEPARATOR)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_PARTITION_COLUMN_SEPARATOR)) { transformations.add(new PartitionColumnsSeparator()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD) && !pctx.getContext().isCboSucceeded()) { transformations.add(new PredicateTransitivePropagate()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CONSTANT_PROPAGATION)) { transformations.add(new ConstantPropagate()); } transformations.add(new SyntheticJoinPredicate()); transformations.add(new PredicatePushDown()); - } else if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) && + } else if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD) && pctx.getContext().isCboSucceeded()) { transformations.add(new SyntheticJoinPredicate()); transformations.add(new SimplePredicatePushDown()); transformations.add(new RedundantDynamicPruningConditionsRemoval()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CONSTANT_PROPAGATION) && (!pctx.getContext().isCboSucceeded() || pctx.getContext().getOperation() == Context.Operation.MERGE)) { // We run constant propagation twice because after predicate pushdown, filter expressions // are combined and may become eligible for reduction (like is not null filter). @@ -121,26 +121,26 @@ public void initialize(HiveConf hiveConf) { transformations.add(new SortedDynPartitionTimeGranularityOptimizer()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD)) { transformations.add(new PartitionPruner()); transformations.add(new PartitionConditionRemover()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) { /* Add list bucketing pruner. */ transformations.add(new ListBucketingPruner()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCONSTANTPROPAGATION) && + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CONSTANT_PROPAGATION) && !pctx.getContext().isCboSucceeded()) { // PartitionPruner may create more folding opportunities, run ConstantPropagate again. transformations.add(new ConstantPropagate()); } } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTGROUPBY) || + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_GROUPBY) || HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_MAP_GROUPBY_SORT)) { transformations.add(new GroupByOptimizer()); } transformations.add(new ColumnPruner()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVECOUNTDISTINCTOPTIMIZER) + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_COUNT_DISTINCT_OPTIMIZER) && (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IN_TEST) || isTezExecEngine)) { transformations.add(new CountDistinctRewriteProc()); } @@ -156,7 +156,7 @@ public void initialize(HiveConf hiveConf) { MapJoinProcessor mapJoinProcessor = new MapJoinProcessor(); transformations.add(mapJoinProcessor); - if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTBUCKETMAPJOIN)) + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_BUCKET_MAPJOIN)) && !isTezExecEngine) { transformations.add(new BucketMapJoinOptimizer()); bucketMapJoinOptimizer = true; @@ -164,7 +164,7 @@ public void initialize(HiveConf hiveConf) { // If optimize hive.optimize.bucketmapjoin.sortedmerge is set, add both // BucketMapJoinOptimizer and SortedMergeBucketMapJoinOptimizer - if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTSORTMERGEBUCKETMAPJOIN)) + if ((HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_SORT_MERGE_BUCKET_MAPJOIN)) && !isTezExecEngine) { if (!bucketMapJoinOptimizer) { // No need to add BucketMapJoinOptimizer twice @@ -173,20 +173,20 @@ public void initialize(HiveConf hiveConf) { transformations.add(new SortedMergeBucketMapJoinOptimizer()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEBUCKETINGSORTING)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_BUCKETING_SORTING)) { transformations.add(new BucketingSortingReduceSinkOptimizer()); } transformations.add(new UnionProcessor()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.NWAYJOINREORDER)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.N_WAY_JOIN_REORDER)) { transformations.add(new JoinReorder()); } if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.TEZ_OPTIMIZE_BUCKET_PRUNING) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTPPD) - && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_PPD) + && HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER)) { final boolean compatMode = HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT); transformations.add(new FixedBucketPruningOptimizer(compatMode)); @@ -194,31 +194,31 @@ public void initialize(HiveConf hiveConf) { transformations.add(new BucketVersionPopulator()); - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION) && + if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION) && !isTezExecEngine) { transformations.add(new ReduceSinkDeDuplication()); } transformations.add(new NonBlockingOpDeDupProc()); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEIDENTITYPROJECTREMOVER) + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_IDENTITY_PROJECT_REMOVER) && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) { transformations.add(new IdentityProjectRemover()); } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVELIMITOPTENABLE)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_LIMIT_OPT_ENABLE)) { transformations.add(new GlobalLimitOptimizer()); } - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTCORRELATION) && - !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEGROUPBYSKEW) && + if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_CORRELATION) && + !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_GROUPBY_SKEW) && !HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME) && !isTezExecEngine) { transformations.add(new CorrelationOptimizer()); } - if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE) > 0) { + if (HiveConf.getFloatVar(hiveConf, HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE) > 0) { transformations.add(new LimitPushdownOptimizer()); } if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_LIMIT)) { transformations.add(new OrderlessLimitPushDownOptimizer()); } - if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES)) { + if(HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES)) { transformations.add(new StatsOptimizer()); } if (pctx.getContext().isExplainSkipExecution() && !isTezExecEngine) { @@ -226,11 +226,11 @@ public void initialize(HiveConf hiveConf) { transformations.add(new AnnotateWithOpTraits()); } - if (!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVEFETCHTASKCONVERSION).equals("none")) { + if (!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION).equals("none")) { transformations.add(new SimpleFetchOptimizer()); // must be called last } - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEFETCHTASKAGGR)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_FETCH_TASK_AGGR)) { transformations.add(new SimpleFetchAggregation()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java index 1201418bcccc..d3764dcc2365 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SemiJoinReductionMerge.java @@ -399,10 +399,10 @@ private static GroupByOperator createGroupBy(SelectOperator selectOp, Operator stack, ReduceSinkOperator sink = (ReduceSinkOperator) nd; ReduceSinkDesc desc = sink.getConf(); - long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); - int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); - int constantReducers = context.conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); + int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); + int constantReducers = context.conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); if (context.visitedReduceSinks.contains(sink)) { // skip walking the children diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java index 67437aed3620..2f80bcfdb583 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SharedWorkOptimizer.java @@ -649,8 +649,9 @@ protected void mergeSchema(TableScanOperator discardableTsOp, TableScanOperator } private static boolean compatibleSchema(TableScanOperator tsOp1, TableScanOperator tsOp2) { - return tsOp1.getNeededColumns().equals(tsOp2.getNeededColumns()) - && tsOp1.getNeededColumnIDs().equals(tsOp2.getNeededColumnIDs()); + return Objects.equals(tsOp1.getNeededColumns(), tsOp2.getNeededColumns()) + && Objects.equals(tsOp1.getNeededColumnIDs(), tsOp2.getNeededColumnIDs()) + && Objects.equals(tsOp1.getConf().getVirtualCols(), tsOp2.getConf().getVirtualCols()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java index 83f6d9e7226a..fb1f6a1c7952 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SimpleFetchOptimizer.java @@ -131,7 +131,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { private FetchTask optimize(ParseContext pctx, String alias, TableScanOperator source) throws Exception { String mode = HiveConf.getVar( - pctx.getConf(), HiveConf.ConfVars.HIVEFETCHTASKCONVERSION); + pctx.getConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION); boolean aggressive = "more".equals(mode); final int limit = pctx.getQueryProperties().getOuterQueryLimit(); @@ -144,7 +144,7 @@ private FetchTask optimize(ParseContext pctx, String alias, TableScanOperator so FetchWork fetchWork = fetch.convertToWork(); FetchTask fetchTask = (FetchTask) TaskFactory.get(fetchWork); fetchTask.setCachingEnabled(HiveConf.getBoolVar(pctx.getConf(), - HiveConf.ConfVars.HIVEFETCHTASKCACHING)); + HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING)); fetchWork.setSink(fetch.completed(pctx, fetchWork)); fetchWork.setSource(source); fetchWork.setLimit(limit); @@ -154,7 +154,7 @@ private FetchTask optimize(ParseContext pctx, String alias, TableScanOperator so } private boolean checkThreshold(FetchData data, int limit, ParseContext pctx) throws Exception { - boolean cachingEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEFETCHTASKCACHING); + boolean cachingEnabled = HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING); if (!cachingEnabled) { if (limit > 0) { if (data.hasOnlyPruningFilter()) { @@ -176,7 +176,7 @@ private boolean checkThreshold(FetchData data, int limit, ParseContext pctx) thr } // if caching is enabled we apply the treshold in all cases long threshold = HiveConf.getLongVar(pctx.getConf(), - HiveConf.ConfVars.HIVEFETCHTASKCONVERSIONTHRESHOLD); + HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION_THRESHOLD); if (threshold < 0) { return true; } @@ -209,7 +209,7 @@ private FetchData checkTree(boolean aggressive, ParseContext pctx, String alias, } boolean bypassFilter = false; - if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVEOPTPPD)) { + if (HiveConf.getBoolVar(pctx.getConf(), HiveConf.ConfVars.HIVE_OPT_PPD)) { ExprNodeDesc pruner = pctx.getOpToPartPruner().get(ts); if (PartitionPruner.onlyContainsPartnCols(table, pruner)) { bypassFilter = !pctx.getPrunedPartitions(alias, ts).hasUnknownPartitions(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java index b57ddd8e6c4e..cebb937c1209 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedDynPartitionOptimizer.java @@ -836,7 +836,7 @@ private ArrayList getPositionsToExprNodes(List pos, private boolean shouldDo(List partitionPos, Operator fsParent) { int threshold = HiveConf.getIntVar(this.parseCtx.getConf(), - HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD); + HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD); long MAX_WRITERS = -1; switch (threshold) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java index cbfb749fc628..e8cec2fceefc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/SortedMergeBucketMapjoinProc.java @@ -55,7 +55,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // and sort merge bucketed mapjoin cannot be performed if (!convert && pGraphContext.getConf().getBoolVar( - HiveConf.ConfVars.HIVEENFORCESORTMERGEBUCKETMAPJOIN)) { + HiveConf.ConfVars.HIVE_ENFORCE_SORT_MERGE_BUCKET_MAPJOIN)) { throw new SemanticException(ErrorMsg.SORTMERGE_MAPJOIN_FAILED.getMsg()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java index 5b6570a1bad2..7e7c1489cc1c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveDefaultRelMetadataProvider.java @@ -82,7 +82,7 @@ private RelMetadataProvider init(HiveConf hiveConf, List> filter, + Predicate> filter, Hive db, Set tablesUsedByOriginalPlan, HiveTxnManager txnManager) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java index cf419b170c13..4f20609cff94 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/HiveRelOptMaterializationValidator.java @@ -38,7 +38,8 @@ import org.apache.calcite.rex.RexNode; import org.apache.calcite.util.Util; import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.metadata.MaterializationValidationResult; +import org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAggregate; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveAntiJoin; @@ -52,8 +53,10 @@ import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveSortLimit; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveUnion; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; + +import java.util.EnumSet; + +import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.TEXT; /** * Checks the query plan for conditions that would make the plan unsuitable for @@ -62,13 +65,16 @@ * - References to non-deterministic functions. */ public class HiveRelOptMaterializationValidator extends HiveRelShuttleImpl { - static final Logger LOG = LoggerFactory.getLogger(HiveRelOptMaterializationValidator.class); + private static final String UNSUPPORTED_BY_CALCITE_FORMAT = + "Only query text based automatic rewriting is available for materialized view. " + + "Statement has unsupported %s: %s."; protected String resultCacheInvalidReason; - protected String automaticRewritingInvalidReason; + protected MaterializationValidationResult materializationValidationResult; public void validate(RelNode relNode) { try { + materializationValidationResult = new MaterializationValidationResult(RewriteAlgorithm.ALL, ""); relNode.accept(this); } catch (Util.FoundOne e) { // Can ignore - the check failed. @@ -113,7 +119,7 @@ public RelNode visit(HiveFilter filter) { @Override public RelNode visit(HiveJoin join) { if (join.getJoinType() != JoinRelType.INNER) { - setAutomaticRewritingInvalidReason(join.getJoinType() + " join type is not supported by rewriting algorithm."); + unsupportedByCalciteRewrite("join type", join.getJoinType().toString()); } checkExpr(join.getCondition()); return super.visit(join); @@ -152,7 +158,7 @@ public RelNode visit(RelNode node) { @Override public RelNode visit(TableFunctionScan scan) { - setAutomaticRewritingInvalidReason(scan); + unsupportedByCalciteRewrite("expression", "window function"); checkExpr(scan.getCall()); return super.visit(scan); } @@ -231,13 +237,13 @@ public RelNode visit(LogicalExchange exchange) { // Note: Not currently part of the HiveRelNode interface private RelNode visit(HiveUnion union) { - setAutomaticRewritingInvalidReason("Statement has unsupported operator: union."); + unsupportedByCalciteRewrite("operator", "union"); return visitChildren(union); } @Override public RelNode visit(HiveSortLimit sort) { - setAutomaticRewritingInvalidReason("Statement has unsupported clause: order by."); + unsupportedByCalciteRewrite("clause","order by"); checkExpr(sort.getFetchExpr()); checkExpr(sort.getOffsetExpr()); return visitChildren(sort); @@ -245,20 +251,20 @@ public RelNode visit(HiveSortLimit sort) { // Note: Not currently part of the HiveRelNode interface private RelNode visit(HiveSortExchange sort) { - setAutomaticRewritingInvalidReason("Statement has unsupported clause: sort by."); + unsupportedByCalciteRewrite("clause", "sort by"); return visitChildren(sort); } // Note: Not currently part of the HiveRelNode interface private RelNode visit(HiveSemiJoin semiJoin) { - setAutomaticRewritingInvalidReason("Statement has unsupported join type: semi join."); + unsupportedByCalciteRewrite("join type", "semi join"); checkExpr(semiJoin.getCondition()); checkExpr(semiJoin.getJoinFilter()); return visitChildren(semiJoin); } private RelNode visit(HiveAntiJoin antiJoin) { - setAutomaticRewritingInvalidReason("Statement has unsupported join type: anti join."); + unsupportedByCalciteRewrite("join type", "anti join"); checkExpr(antiJoin.getCondition()); checkExpr(antiJoin.getJoinFilter()); return visitChildren(antiJoin); @@ -266,26 +272,30 @@ private RelNode visit(HiveAntiJoin antiJoin) { // Note: Not currently part of the HiveRelNode interface private RelNode visit(HiveExcept except) { - setAutomaticRewritingInvalidReason("Statement has unsupported operator: except."); + unsupportedByCalciteRewrite("operator", "except"); return visitChildren(except); } // Note: Not currently part of the HiveRelNode interface private RelNode visit(HiveIntersect intersect) { - setAutomaticRewritingInvalidReason("Statement has unsupported operator: intersect."); + unsupportedByCalciteRewrite("operator", "intersect"); return visitChildren(intersect); } private void fail(String reason) { setResultCacheInvalidReason(reason); - setAutomaticRewritingInvalidReason(reason); + this.materializationValidationResult = new MaterializationValidationResult( + EnumSet.noneOf(RewriteAlgorithm.class), "Cannot enable automatic rewriting for materialized view. " + reason); throw Util.FoundOne.NULL; } private RelNode fail(RelNode node) { setResultCacheInvalidReason("Unsupported RelNode type " + node.getRelTypeName() + " encountered in the query plan"); - setAutomaticRewritingInvalidReason(node); + this.materializationValidationResult = + new MaterializationValidationResult(EnumSet.noneOf(RewriteAlgorithm.class), + String.format("Cannot enable automatic rewriting for materialized view. " + + "Unsupported RelNode type %s encountered in the query plan", node.getRelTypeName())); throw Util.FoundOne.NULL; } @@ -308,24 +318,19 @@ public boolean isValidForQueryCaching() { return resultCacheInvalidReason == null; } - public String getAutomaticRewritingInvalidReason() { - return automaticRewritingInvalidReason; - } - - public void setAutomaticRewritingInvalidReason(String automaticRewritingInvalidReason) { - if (isValidForAutomaticRewriting()) { - this.automaticRewritingInvalidReason = automaticRewritingInvalidReason; - } + public MaterializationValidationResult getAutomaticRewritingValidationResult() { + return materializationValidationResult; } - public void setAutomaticRewritingInvalidReason(RelNode node) { + public void unsupportedByCalciteRewrite(String sqlPartType, String sqlPart) { if (isValidForAutomaticRewriting()) { - this.automaticRewritingInvalidReason = "Unsupported RelNode type " + node.getRelTypeName() + - " encountered in the query plan"; + String errorMessage = String.format(UNSUPPORTED_BY_CALCITE_FORMAT, sqlPartType, sqlPart); + this.materializationValidationResult = + new MaterializationValidationResult(EnumSet.of(TEXT), errorMessage); } } public boolean isValidForAutomaticRewriting() { - return automaticRewritingInvalidReason == null; + return RewriteAlgorithm.ALL.equals(materializationValidationResult.getSupportedRewriteAlgorithms()); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java index af9b12ee6f71..ef865e4d22c2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateAddSqlOperator.java @@ -18,13 +18,16 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators; -import org.apache.calcite.sql.fun.SqlAbstractTimeFunction; -import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.SqlFunction; +import org.apache.calcite.sql.SqlFunctionCategory; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.ReturnTypes; -public class HiveDateAddSqlOperator extends SqlAbstractTimeFunction { - public static final HiveDateAddSqlOperator INSTANCE = new HiveDateAddSqlOperator(); +public final class HiveDateAddSqlOperator { + public static final SqlFunction INSTANCE = + new SqlFunction("DATE_ADD", SqlKind.OTHER_FUNCTION, ReturnTypes.DATE_NULLABLE, null, null, + SqlFunctionCategory.TIMEDATE); - protected HiveDateAddSqlOperator() { - super("DATE_ADD", SqlTypeName.DATE); + private HiveDateAddSqlOperator() { } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java index 4f737126f02a..d1c002111895 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveDateSubSqlOperator.java @@ -18,13 +18,16 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators; -import org.apache.calcite.sql.fun.SqlAbstractTimeFunction; -import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.SqlFunction; +import org.apache.calcite.sql.SqlFunctionCategory; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.ReturnTypes; -public class HiveDateSubSqlOperator extends SqlAbstractTimeFunction { - public static final HiveDateSubSqlOperator INSTANCE = new HiveDateSubSqlOperator(); +public final class HiveDateSubSqlOperator { + public static final SqlFunction INSTANCE = + new SqlFunction("DATE_SUB", SqlKind.OTHER_FUNCTION, ReturnTypes.DATE_NULLABLE, null, null, + SqlFunctionCategory.TIMEDATE); - protected HiveDateSubSqlOperator() { - super("DATE_SUB", SqlTypeName.DATE); + private HiveDateSubSqlOperator() { } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java index 22d91b5a537a..54c68ec89be6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveFromUnixTimeSqlOperator.java @@ -18,15 +18,32 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.reloperators; -import org.apache.calcite.sql.fun.SqlAbstractTimeFunction; +import org.apache.calcite.sql.SqlFunction; +import org.apache.calcite.sql.SqlFunctionCategory; +import org.apache.calcite.sql.SqlKind; +import org.apache.calcite.sql.type.OperandTypes; +import org.apache.calcite.sql.type.ReturnTypes; +import org.apache.calcite.sql.type.SqlTypeFamily; import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.sql.type.SqlTypeTransforms; + +import java.util.Arrays; /** - * Calcite SQL operator mapping to FROM_UNIXTIME Hive UDF + * Calcite SQL operator mapping to FROM_UNIXTIME Hive UDF. + *

+ * The return type of the function is declared as {@code VARCHAR(100)} since it is highly unlikely that a user will + * request a timestamp format that requires more than 100 characters. + *

*/ -public class HiveFromUnixTimeSqlOperator extends SqlAbstractTimeFunction { - public static final HiveFromUnixTimeSqlOperator INSTANCE = new HiveFromUnixTimeSqlOperator(); - protected HiveFromUnixTimeSqlOperator() { - super("FROM_UNIXTIME", SqlTypeName.TIMESTAMP); +public final class HiveFromUnixTimeSqlOperator { + public static final SqlFunction INSTANCE = new SqlFunction("FROM_UNIXTIME", + SqlKind.OTHER_FUNCTION, + ReturnTypes.explicit(SqlTypeName.VARCHAR, 100).andThen(SqlTypeTransforms.TO_NULLABLE), + null, + OperandTypes.family(Arrays.asList(SqlTypeFamily.INTEGER, SqlTypeFamily.STRING), number -> number == 1), + SqlFunctionCategory.STRING); + + private HiveFromUnixTimeSqlOperator() { } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToUnixTimestampSqlOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToUnixTimestampSqlOperator.java index 372aa3002522..18751ce73929 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToUnixTimestampSqlOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/reloperators/HiveToUnixTimestampSqlOperator.java @@ -27,15 +27,13 @@ /** * Sql UNIX_TIMESTAMP calcite operator. */ -public class HiveToUnixTimestampSqlOperator { +public final class HiveToUnixTimestampSqlOperator { public static final SqlFunction INSTANCE = new SqlFunction("UNIX_TIMESTAMP", SqlKind.OTHER_FUNCTION, ReturnTypes.BIGINT, null, OperandTypes.or(OperandTypes.NILADIC, OperandTypes.or(OperandTypes.STRING, OperandTypes.TIMESTAMP, OperandTypes.DATE), - OperandTypes.STRING_STRING), SqlFunctionCategory.NUMERIC) { - @Override - public boolean isDynamicFunction() { - return true; - } - }; + OperandTypes.STRING_STRING), SqlFunctionCategory.NUMERIC); + + private HiveToUnixTimestampSqlOperator() { + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java index fd54c0fd689e..f10a19e41ada 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/HiveRelDecorrelator.java @@ -3014,18 +3014,10 @@ public boolean traverse(RelNode root) { } /** Builds a {@link org.apache.calcite.sql2rel.RelDecorrelator.CorelMap}. */ private static class CorelMapBuilder extends HiveRelShuttleImpl { - private final SortedMap mapCorToCorRel = - new TreeMap<>(); - - private final SortedSetMultimap mapRefRelToCorRef = - Multimaps.newSortedSetMultimap( - new HashMap>(), - new Supplier>() { - @Override - public TreeSet get() { - return Sets.newTreeSet(); - } - }); + private final SortedMap mapCorToCorRel = new TreeMap<>(); + + private final Multimap mapRefRelToCorRef = + Multimaps.newListMultimap(new HashMap<>(), Lists::newArrayList); private final Map mapFieldAccessToCorVar = new HashMap<>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java index 9005f643e2ae..6a826a15b1ff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveAggregateInsertDeleteIncrementalRewritingRule.java @@ -30,6 +30,7 @@ import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.fun.SqlStdOperatorTable; import org.apache.calcite.tools.RelBuilder; +import org.apache.hadoop.hive.ql.ddl.view.materialized.alter.rebuild.AlterMaterializedViewRebuildAnalyzer; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; import org.apache.hadoop.hive.ql.optimizer.calcite.rules.HiveHepExtractRelNodeRule; @@ -38,7 +39,8 @@ * This rule will perform a rewriting to prepare the plan for incremental * view maintenance in case there exist aggregation operator, so we can * avoid the INSERT OVERWRITE and use a MERGE statement instead. - * + *
+ *
  * In particular, the INSERT OVERWRITE maintenance will look like this
  * (in SQL):
  * INSERT OVERWRITE mv
@@ -52,8 +54,9 @@
  *   WHERE TAB_A.ROW_ID > 5
  *   GROUP BY a, b) inner_subq
  * GROUP BY a, b;
- *
+ * 
* We need to transform that into: + *
  * MERGE INTO mv
  * USING (
  *   SELECT a, b, SUM(x) AS s, COUNT(*) AS c --NEW DATA
@@ -67,8 +70,9 @@
  * WHEN MATCHED AND countStar = 0 THEN DELETE
  * WHEN NOT MATCHED
  *   THEN INSERT VALUES (source.a, source.b, s, c);
- *
+ * 
* To be precise, we need to convert it into a MERGE rewritten as: + *
  * FROM (select *, true flag from mv) mv right outer join _source_ source
  * ON (mv.a <=> source.a AND mv.b <=> source.b)
  * INSERT INTO TABLE mv                                       <- (insert new rows into the view)
@@ -90,8 +94,9 @@
  *   SELECT mv.ROW__ID
  *   WHERE mv.flag AND countStar = 0
  *   SORT BY mv.ROW__ID;
+ * 
* - * @see org.apache.hadoop.hive.ql.parse.CalcitePlanner + * @see AlterMaterializedViewRebuildAnalyzer */ public class HiveAggregateInsertDeleteIncrementalRewritingRule extends HiveAggregateIncrementalRewritingRuleBase< HiveAggregateInsertDeleteIncrementalRewritingRule.IncrementalComputePlanWithDeletedRows> { @@ -116,7 +121,10 @@ protected IncrementalComputePlanWithDeletedRows createJoinRightInput(RelOptRuleC aggInput = HiveHepExtractRelNodeRule.execute(aggInput); aggInput = new HiveRowIsDeletedPropagator(relBuilder).propagate(aggInput); - int rowIsDeletedIdx = aggInput.getRowType().getFieldCount() - 1; + // The row schema has two additional columns after propagation: + // rowIsDeleted is the last but one + // col0 ... coln, _any_deleted, _any_inserted + int rowIsDeletedIdx = aggInput.getRowType().getFieldCount() - 2; RexNode rowIsDeletedNode = rexBuilder.makeInputRef( aggInput.getRowType().getFieldList().get(rowIsDeletedIdx).getType(), rowIsDeletedIdx); @@ -130,7 +138,7 @@ protected IncrementalComputePlanWithDeletedRows createJoinRightInput(RelOptRuleC List newAggregateCalls = new ArrayList<>(aggregate.getAggCallList().size()); for (int i = 0; i < aggregate.getAggCallList().size(); ++i) { AggregateCall aggregateCall = aggregate.getAggCallList().get(i); - if (aggregateCall.getAggregation().getKind() == SqlKind.COUNT && aggregateCall.getArgList().size() == 0) { + if (aggregateCall.getAggregation().getKind() == SqlKind.COUNT && aggregateCall.getArgList().isEmpty()) { countIdx = i + aggregate.getGroupCount(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java deleted file mode 100644 index 57274e55d87e..000000000000 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveJoinInsertDeleteIncrementalRewritingRule.java +++ /dev/null @@ -1,196 +0,0 @@ -package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views;/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import org.apache.calcite.plan.RelOptRule; -import org.apache.calcite.plan.RelOptRuleCall; -import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rel.core.JoinRelType; -import org.apache.calcite.rel.core.Union; -import org.apache.calcite.rel.type.RelDataType; -import org.apache.calcite.rel.type.RelDataTypeField; -import org.apache.calcite.rex.RexBuilder; -import org.apache.calcite.rex.RexNode; -import org.apache.calcite.rex.RexUtil; -import org.apache.calcite.sql.fun.SqlStdOperatorTable; -import org.apache.calcite.tools.RelBuilder; -import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; -import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; -import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; -import org.apache.hadoop.hive.ql.parse.ASTNode; -import org.apache.hadoop.hive.ql.parse.CalcitePlanner; - -import java.util.ArrayList; -import java.util.List; - -/** - * This rule will perform a rewriting to prepare the plan for incremental - * view maintenance in case there is no aggregation operator but some of the - * source tables has delete operations, so we can avoid the INSERT OVERWRITE and use a - * MULTI INSERT statement instead: one insert branch for inserted rows - * and another for inserting deleted rows to delete delta. - * Since CBO plan does not contain the INSERT branches we focus on the SELECT part of the plan in this rule. - * See also {@link CalcitePlanner} - * - * FROM (select mv.ROW__ID, mv.a, mv.b from mv) mv - * RIGHT OUTER JOIN (SELECT _source_.ROW__IS_DELETED,_source_.a, _source_.b FROM _source_) source - * ON (mv.a <=> source.a AND mv.b <=> source.b) - * INSERT INTO TABLE mv_delete_delta - * SELECT mv.ROW__ID - * WHERE source.ROW__IS__DELETED - * INSERT INTO TABLE mv - * SELECT source.a, source.b - * WHERE NOT source.ROW__IS__DELETED - * SORT BY mv.ROW__ID; - */ -public class HiveJoinInsertDeleteIncrementalRewritingRule extends RelOptRule { - - public static final HiveJoinInsertDeleteIncrementalRewritingRule INSTANCE = - new HiveJoinInsertDeleteIncrementalRewritingRule(); - - private HiveJoinInsertDeleteIncrementalRewritingRule() { - super(operand(Union.class, any()), - HiveRelFactories.HIVE_BUILDER, - "HiveJoinInsertDeleteIncrementalRewritingRule"); - } - - @Override - public void onMatch(RelOptRuleCall call) { - final Union union = call.rel(0); - RexBuilder rexBuilder = union.getCluster().getRexBuilder(); - // First branch is query, second branch is MV - // 1) First branch is query, second branch is MV - final RelNode joinLeftInput = union.getInput(1); - final RelNode joinRightInput = union.getInput(0); - - // 2) Build conditions for join and start adding - // expressions for project operator - List projExprs = new ArrayList<>(); - List joinConjs = new ArrayList<>(); - for (int leftPos = 0; leftPos < joinLeftInput.getRowType().getFieldCount() - 1; leftPos++) { - RexNode leftRef = rexBuilder.makeInputRef( - joinLeftInput.getRowType().getFieldList().get(leftPos).getType(), leftPos); - RexNode rightRef = rexBuilder.makeInputRef( - joinRightInput.getRowType().getFieldList().get(leftPos).getType(), - leftPos + joinLeftInput.getRowType().getFieldCount()); - - projExprs.add(rightRef); - - joinConjs.add(rexBuilder.makeCall(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, leftRef, rightRef)); - } - - RexNode joinCond = RexUtil.composeConjunction(rexBuilder, joinConjs); - - int rowIsDeletedIdx = joinRightInput.getRowType().getFieldCount() - 1; - RexNode rowIsDeleted = rexBuilder.makeInputRef( - joinRightInput.getRowType().getFieldList().get(rowIsDeletedIdx).getType(), - joinLeftInput.getRowType().getFieldCount() + rowIsDeletedIdx); - projExprs.add(rowIsDeleted); - - // 3) Build plan - RelNode newNode = call.builder() - .push(union.getInput(1)) - .push(union.getInput(0)) - .join(JoinRelType.RIGHT, joinCond) - .project(projExprs) - .build(); - call.transformTo(newNode); - } - - public static class FilterPropagator extends HiveRowIsDeletedPropagator { - - private boolean foundTopRightJoin; - - public FilterPropagator(RelBuilder relBuilder) { - super(relBuilder); - } - - @Override - public RelNode propagate(RelNode relNode) { - foundTopRightJoin = false; - return super.propagate(relNode); - } - - @Override - public RelNode visit(HiveProject project) { - if (!foundTopRightJoin) { - return visitChild(project, 0, project.getInput()); - } - - // continue traversal and propagate rowIsDeleted column - return super.visit(project); - } - - @Override - public RelNode visit(HiveJoin join) { - if (!foundTopRightJoin) { - if (join.getJoinType() != JoinRelType.RIGHT) { - // continue search for top Right Join node - return visitChildren(join); - } - - foundTopRightJoin = true; - return createFilter(join); - } - - // continue traversal and propagate rowIsDeleted column - return super.visit(join); - } - - private RelNode createFilter(HiveJoin join) { - // This should be a Scan on the MV - RelNode leftInput = join.getLeft(); - - // This branch is querying the rows should be inserted/deleted into the view since the last rebuild. - RelNode rightInput = join.getRight(); - - RelNode tmpJoin = visitChild(join, 1, rightInput); - RelNode newRightInput = tmpJoin.getInput(1); - - // Create input ref to rowIsDeleteColumn. It is used in filter condition later. - RelDataType newRowType = newRightInput.getRowType(); - int rowIsDeletedIdx = newRowType.getFieldCount() - 1; - RexBuilder rexBuilder = relBuilder.getRexBuilder(); - RexNode rowIsDeleted = rexBuilder.makeInputRef( - newRowType.getFieldList().get(rowIsDeletedIdx).getType(), - leftInput.getRowType().getFieldCount() + rowIsDeletedIdx); - - List projects = new ArrayList<>(newRowType.getFieldCount()); - List projectNames = new ArrayList<>(newRowType.getFieldCount()); - for (int i = 0; i < leftInput.getRowType().getFieldCount(); ++i) { - RelDataTypeField relDataTypeField = leftInput.getRowType().getFieldList().get(i); - projects.add(rexBuilder.makeInputRef(relDataTypeField.getType(), i)); - projectNames.add(relDataTypeField.getName()); - } - for (int i = 0; i < newRowType.getFieldCount() - 1; ++i) { - RelDataTypeField relDataTypeField = newRowType.getFieldList().get(i); - projects.add(rexBuilder.makeInputRef(relDataTypeField.getType(), leftInput.getRowType().getFieldCount() + i)); - projectNames.add(relDataTypeField.getName()); - } - - // Create new Top Right Join and a Filter. The filter condition is used in CalcitePlanner.fixUpASTJoinIncrementalRebuild(). - return relBuilder - .push(leftInput) - .push(newRightInput) - .join(join.getJoinType(), join.getCondition()) - .filter(rexBuilder.makeCall(SqlStdOperatorTable.OR, - rowIsDeleted, rexBuilder.makeCall(SqlStdOperatorTable.NOT, rowIsDeleted))) - .project(projects, projectNames) - .build(); - } - } -} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveRowIsDeletedPropagator.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveRowIsDeletedPropagator.java index 9537acd16a48..afb1ef36fb3f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveRowIsDeletedPropagator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/HiveRowIsDeletedPropagator.java @@ -17,61 +17,105 @@ */ package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; +import org.apache.calcite.linq4j.Ord; import org.apache.calcite.rel.RelNode; -import org.apache.calcite.rel.core.JoinRelType; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeField; import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexCall; import org.apache.calcite.rex.RexInputRef; import org.apache.calcite.rex.RexNode; -import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.rex.RexUtil; +import org.apache.calcite.rex.RexVisitor; +import org.apache.calcite.rex.RexVisitorImpl; +import org.apache.calcite.sql.SqlKind; import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.type.SqlTypeName; import org.apache.calcite.tools.RelBuilder; +import org.apache.calcite.util.ReflectUtil; +import org.apache.calcite.util.ReflectiveVisitor; +import org.apache.hadoop.hive.ql.ddl.view.materialized.alter.rebuild.AlterMaterializedViewRebuildAnalyzer; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; -import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelShuttle; -import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelShuttleImpl; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveJoin; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveProject; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Map; +import java.util.Set; + +import static java.util.Arrays.asList; /** - * {@link HiveRelShuttle} to propagate rowIsDeleted column to all HiveRelNodes' rowType in the plan. - * General rule: we expect that the rowIsDeleted column is the last column in the input rowType of the current + * {@link ReflectiveVisitor} to propagate row is deleted or inserted columns to all HiveRelNodes' rowType in the plan. + * General rule: we expect that these columns are the last columns in the input rowType of the current * {@link org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode}. + * + * This class is part of incremental rebuild of materialized view plan generation. + *
+ * @see AlterMaterializedViewRebuildAnalyzer + * @see HiveAggregateInsertDeleteIncrementalRewritingRule */ -public class HiveRowIsDeletedPropagator extends HiveRelShuttleImpl { +public class HiveRowIsDeletedPropagator implements ReflectiveVisitor { - protected final RelBuilder relBuilder; + private static final String ANY_DELETED_COLUMN_NAME = "_any_deleted"; + private static final String ANY_INSERTED_COLUMN_NAME = "_any_inserted"; + private static final String DELETED_COLUMN_NAME = "_deleted"; + private static final String INSERTED_COLUMN_NAME = "_inserted"; + + private final RelBuilder relBuilder; + private final ReflectUtil.MethodDispatcher dispatcher; public HiveRowIsDeletedPropagator(RelBuilder relBuilder) { this.relBuilder = relBuilder; + this.dispatcher = ReflectUtil.createMethodDispatcher( + RelNode.class, this, "visit", RelNode.class, Context.class); } public RelNode propagate(RelNode relNode) { - return relNode.accept(this); - } - - /** - * Create a Projection on top of TS that contains all columns from TS. - * Let rowIsDeleted the last column in the new Project. - * Enable fetching Deleted rows in TS. - * @param scan - TS to transform - * @return - new TS and a optionally a Project on top of it. - */ - @Override - public RelNode visit(HiveTableScan scan) { - RelDataType tableRowType = scan.getTable().getRowType(); - RelDataTypeField column = tableRowType.getField( - VirtualColumn.ROWISDELETED.getName(), false, false); - if (column == null) { - // This should not happen since Virtual columns are propagated for all native table scans in - // CalcitePlanner.genTableLogicalPlan() - throw new ColumnPropagationException("TableScan " + scan + " row schema does not contain " + - VirtualColumn.ROWISDELETED.getName() + " virtual column"); + return dispatcher.invoke(relNode, new Context()); + } + + private RelNode visitChild(RelNode parent, int i, RelNode child, Context context) { + RelNode newRel = dispatcher.invoke(child, context); + final List newInputs = new ArrayList<>(parent.getInputs()); + newInputs.set(i, newRel); + return parent.copy(parent.getTraitSet(), newInputs); + } + + private RelNode visitChildren(RelNode rel, Context context) { + for (Ord input : Ord.zip(rel.getInputs())) { + rel = visitChild(rel, input.i, input.e, context); } + return rel; + } + + public static final class Context { + private final Map rowIdPredicates = new HashMap<>(); + } + + public RelNode visit(RelNode relNode, Context context) { + return visitChildren(relNode, context); + } + + // Add a project on top of the TS. + // Project two boolean columns: one for indicating the row is deleted another + // for newly inserted. + // A row is considered to be + // - deleted when the ROW_IS_DELETED virtual column is true and the writeId of the record is higher than the + // saved in materialized view snapshot metadata + // - newly inserted when the ROW_IS_DELETED virtual column is false and the writeId of the record is higher than the + // saved in materialized view snapshot metadata + public RelNode visit(HiveTableScan scan, Context context) { + RelDataType tableRowType = scan.getTable().getRowType(); + RelDataTypeField rowIdField = getVirtualColumnField(tableRowType, VirtualColumn.ROWID, scan); + RexNode rowIdPredicate = context.rowIdPredicates.get(rowIdField.getIndex()); + + RelDataTypeField rowIsDeletedField = getVirtualColumnField(tableRowType, VirtualColumn.ROWISDELETED, scan); RexBuilder rexBuilder = relBuilder.getRexBuilder(); @@ -79,10 +123,27 @@ public RelNode visit(HiveTableScan scan) { List projectNames = new ArrayList<>(tableRowType.getFieldCount()); populateProjects(rexBuilder, tableRowType, projects, projectNames); // Propagated column is already in the TS move it to the end - RexNode propagatedColumn = projects.remove(column.getIndex()); - projects.add(propagatedColumn); - String propagatedColumnName = projectNames.remove(column.getIndex()); - projectNames.add(propagatedColumnName); + RexNode rowIsDeleted = projects.remove(rowIsDeletedField.getIndex()); + projects.add(rowIsDeleted); + // predicates on rowId introduced by HiveAugmentMaterializationRule into the original MV definition query plan + // on top of each TS operators. + // Later that plan is transformed to a Union rewrite plan where all rowId predicates are pulled up on top of + // the top Join operator. + if (rowIdPredicate == null) { + // If a table have not changed then no predicate is introduced for the TS. All rows in the table should remain. + projects.add(rexBuilder.makeLiteral(false)); + projects.add(rexBuilder.makeLiteral(false)); + } else { + // A row is deleted if ROW_IS_DELETED is true and rowId > + projects.add(rexBuilder.makeCall(SqlStdOperatorTable.AND, rowIsDeleted, rowIdPredicate)); + // A row is newly inserted if ROW_IS_DELETED is false and rowId > + projects.add(rexBuilder.makeCall(SqlStdOperatorTable.AND, + rexBuilder.makeCall(SqlStdOperatorTable.NOT, rowIsDeleted), rowIdPredicate)); + } + String rowIsDeletedName = projectNames.remove(rowIsDeletedField.getIndex()); + projectNames.add(rowIsDeletedName); + projectNames.add(DELETED_COLUMN_NAME); + projectNames.add(INSERTED_COLUMN_NAME); // Note: as a nature of Calcite if row schema of TS and the new Project would be exactly the same no // Project is created. @@ -92,111 +153,146 @@ public RelNode visit(HiveTableScan scan) { .build(); } - /** - * Create a new Project with original projected columns plus add rowIsDeleted as last column referencing - * the last column of the input {@link RelNode}. - * @param project - {@link HiveProject to transform} - * @return new Project - */ - @Override - public RelNode visit(HiveProject project) { - RelNode newProject = visitChild(project, 0, project.getInput()); + // Add the new columns(_deleted, _inserted) to the original project + public RelNode visit(HiveProject project, Context context) { + RelNode newProject = visitChild(project, 0, project.getInput(), context); RelNode projectInput = newProject.getInput(0); - int rowIsDeletedIndex = projectInput.getRowType().getFieldCount() - 1; - List newProjects = new ArrayList<>(project.getRowType().getFieldCount() + 1); - newProjects.addAll(project.getProjects()); - RexNode rowIsDeleted = relBuilder.getRexBuilder().makeInputRef( - projectInput.getRowType().getFieldList().get(rowIsDeletedIndex).getType(), rowIsDeletedIndex); - newProjects.add(rowIsDeleted); + List newProjects = new ArrayList<>(project.getProjects().size() + 2); + newProjects.addAll(project.getProjects()); + newProjects.add(createInputRef(projectInput, 2)); + newProjects.add(createInputRef(projectInput, 1)); return relBuilder - .push(projectInput) - .project(newProjects) - .build(); - } - - /** - * Create new Join and a Project on top of it. - * @param join - {@link HiveJoin} to transform - * @return - new Join with a Project on top - */ - @Override - public RelNode visit(HiveJoin join) { - // Propagate rowISDeleted to left input - RelNode tmpJoin = visitChild(join, 0, join.getInput(0)); - RelNode leftInput = tmpJoin.getInput(0); - RelDataType leftRowType = tmpJoin.getInput(0).getRowType(); - int leftRowIsDeletedIndex = leftRowType.getFieldCount() - 1; - // Propagate rowISDeleted to right input - tmpJoin = visitChild(join, 1, join.getInput(1)); - RelNode rightInput = tmpJoin.getInput(1); - RelDataType rightRowType = rightInput.getRowType(); - int rightRowIsDeletedIndex = rightRowType.getFieldCount() - 1; - - // Create input ref to rowIsDeleted columns in left and right inputs - RexBuilder rexBuilder = relBuilder.getRexBuilder(); - RexNode leftRowIsDeleted = rexBuilder.makeInputRef( - leftRowType.getFieldList().get(leftRowIsDeletedIndex).getType(), leftRowIsDeletedIndex); - RexNode rightRowIsDeleted = rexBuilder.makeInputRef( - rightRowType.getFieldList().get(rightRowIsDeletedIndex).getType(), - leftRowType.getFieldCount() + rightRowIsDeletedIndex); - - RexNode newJoinCondition; - int newLeftFieldCount; - if (join.getInput(0).getRowType().getField(VirtualColumn.ROWISDELETED.getName(), false, false) == null) { - // Shift column references refers columns coming from right input by one in join condition since the new left input - // has a new column - newJoinCondition = new InputRefShifter(leftRowType.getFieldCount() - 1, relBuilder) - .apply(join.getCondition()); - - newLeftFieldCount = leftRowType.getFieldCount() - 1; - } else { - newJoinCondition = join.getCondition(); - newLeftFieldCount = leftRowType.getFieldCount(); + .push(projectInput) + .project(newProjects) + .build(); + } + + // Union rewrite algorithm pulls up all the predicates on rowId on top of top Join operator: + // Example: + // HiveUnion(all=[true]) + // ... + // HiveFilter(condition=[OR(<(1, $14.writeid), <(1, $6.writeid))]) + // HiveJoin(condition=[=($0, $8)], joinType=[inner], algorithm=[none], cost=[not available]) + // Check the filter condition and collect operands of OR expressions referencing only one column + public RelNode visit(HiveFilter filter, Context context) { + RexNode condition = filter.getCondition(); + + // The condition might be a single predicate on the rowId (if only one table changed) + RexInputRef rexInputRef = findPossibleRowIdRef(filter.getCondition()); + if (rexInputRef != null) { + context.rowIdPredicates.put(rexInputRef.getIndex(), filter.getCondition()); + return visitChild(filter, 0, filter.getInput(0), context); } - // Collect projected columns: all columns from both inputs - List projects = new ArrayList<>(newLeftFieldCount + rightRowType.getFieldCount() + 1); - List projectNames = new ArrayList<>(newLeftFieldCount + rightRowType.getFieldCount() + 1); - populateProjects(rexBuilder, leftRowType, 0, newLeftFieldCount, projects, projectNames); - populateProjects(rexBuilder, rightRowType, leftRowType.getFieldCount(), rightRowType.getFieldCount(), projects, projectNames); + if (!condition.isA(SqlKind.OR)) { + return visitChild(filter, 0, filter.getInput(0), context); + } - // Add rowIsDeleted column to project - projects.add(rexBuilder.makeCall(SqlStdOperatorTable.OR, leftRowIsDeleted, rightRowIsDeleted)); - projectNames.add(VirtualColumn.ROWISDELETED.getName()); + for (RexNode operand : ((RexCall)condition).operands) { + RexInputRef inputRef = findPossibleRowIdRef(operand); + if (inputRef != null) { + context.rowIdPredicates.put(inputRef.getIndex(), operand); + } + } - return relBuilder - .push(leftInput) - .push(rightInput) - .join(join.getJoinType(), newJoinCondition) - .project(projects) - .build(); + return visitChild(filter, 0, filter.getInput(0), context); } - private static class InputRefShifter extends RexShuttle { - private final int startIndex; - private final RelBuilder relBuilder; - - private InputRefShifter(int startIndex, RelBuilder relBuilder) { - this.startIndex = startIndex; - this.relBuilder = relBuilder; + private RexInputRef findPossibleRowIdRef(RexNode operand) { + Set inputRefs = findRexInputRefs(operand); + if (inputRefs.size() != 1) { + return null; } - /** - * Shift input reference index by one if the referenced column index is higher or equals with the startIndex. - * @param inputRef - {@link RexInputRef} to transform - * @return new {@link RexInputRef} if the referenced column index is higher or equals with the startIndex, - * original otherwise - */ - @Override - public RexNode visitInputRef(RexInputRef inputRef) { - if (inputRef.getIndex() >= startIndex) { - RexBuilder rexBuilder = relBuilder.getRexBuilder(); - return rexBuilder.makeInputRef(inputRef.getType(), inputRef.getIndex() + 1); + // This is a candidate for predicate on rowId + return inputRefs.iterator().next(); + } + + // Propagate new column to each side of the join. + // Create a project to combine the propagated expressions. + // Create a filter to remove rows which are joined from a deleted and a newly inserted row. + public RelNode visit(HiveJoin join, Context context) { + // Propagate columns to left input + RelNode tmpJoin = visitChild(join, 0, join.getInput(0), context); + RelNode newLeftInput = tmpJoin.getInput(0); + RelDataType newLeftRowType = newLeftInput.getRowType(); + // Propagate columns to right input. + // All column references should be shifted in candidate predicates to the left + Context rightContext = new Context(); + int originalLeftFieldCount = join.getInput(0).getRowType().getFieldCount(); + for (Map.Entry entry : context.rowIdPredicates.entrySet()) { + if (entry.getKey() > originalLeftFieldCount) { + rightContext.rowIdPredicates.put(entry.getKey() - originalLeftFieldCount, + new InputRefShifter(originalLeftFieldCount, -originalLeftFieldCount, relBuilder).apply(entry.getValue())); } - return inputRef; } + tmpJoin = visitChild(join, 1, join.getInput(1), rightContext); + RelNode newRightInput = tmpJoin.getInput(1); + RelDataType newRightRowType = newRightInput.getRowType(); + + // Create input refs to propagated columns in left and right inputs + int rightAnyDeletedIndex = newRightRowType.getFieldCount() - 2; + int rightAnyInsertedIndex = newRightRowType.getFieldCount() - 1; + RexBuilder rexBuilder = relBuilder.getRexBuilder(); + RexNode leftDeleted = createInputRef(newLeftInput, 2); + RexNode leftInserted = createInputRef(newLeftInput, 1); + RexNode rightDeleted = rexBuilder.makeInputRef( + newRightRowType.getFieldList().get(rightAnyDeletedIndex).getType(), + newLeftRowType.getFieldCount() + rightAnyDeletedIndex); + RexNode rightInserted = rexBuilder.makeInputRef( + newRightRowType.getFieldList().get(rightAnyInsertedIndex).getType(), + newLeftRowType.getFieldCount() + rightAnyInsertedIndex); + + // Shift column references refers columns coming from right input in join condition since the new left input + // has a new columns + int newLeftFieldCount = newLeftRowType.getFieldCount() - 2; + RexNode newJoinCondition = new InputRefShifter(newLeftFieldCount, 2, relBuilder).apply(join.getCondition()); + + // Collect projected columns: all columns from both inputs + List projects = new ArrayList<>(newLeftFieldCount + newRightRowType.getFieldCount() + 1); + List projectNames = new ArrayList<>(newLeftFieldCount + newRightRowType.getFieldCount() + 1); + populateProjects(rexBuilder, newLeftRowType, 0, newLeftFieldCount, projects, projectNames); + populateProjects(rexBuilder, newRightRowType, newLeftRowType.getFieldCount(), + newRightRowType.getFieldCount() - 2, projects, projectNames); + + // Create derived expressions + projects.add(rexBuilder.makeCall(SqlStdOperatorTable.OR, leftDeleted, rightDeleted)); + projects.add(rexBuilder.makeCall(SqlStdOperatorTable.OR, leftInserted, rightInserted)); + projectNames.add(ANY_DELETED_COLUMN_NAME); + projectNames.add(ANY_INSERTED_COLUMN_NAME); + + // Create input refs to derived expressions in project + RelDataType boolIntType = relBuilder.getTypeFactory().createSqlType(SqlTypeName.BOOLEAN); + RexNode anyDeleted = rexBuilder.makeInputRef(boolIntType, projects.size() - 2); + RexNode anyInserted = rexBuilder.makeInputRef(boolIntType, projects.size() - 1); + + // Create filter condition: NOT( (leftDeleted OR rightDeleted) AND (leftInserted OR rightInserted) ) + // We exploit that a row can not be deleted and inserted at the same time. + RexNode filterCondition = rexBuilder.makeCall(SqlStdOperatorTable.NOT, + RexUtil.composeConjunction(rexBuilder, asList(anyDeleted, anyInserted))); + + return relBuilder + .push(newLeftInput) + .push(newRightInput) + .join(join.getJoinType(), newJoinCondition) + .project(projects, projectNames) + .filter(filterCondition) + .build(); + } + + private RelDataTypeField getVirtualColumnField( + RelDataType tableRowType, VirtualColumn virtualColumn, HiveTableScan scan) { + RelDataTypeField field = tableRowType.getField( + virtualColumn.getName(), false, false); + if (field == null) { + // This should not happen since Virtual columns are propagated for all native table scans in + // CalcitePlanner.genTableLogicalPlan() + throw new ColumnPropagationException("TableScan " + scan + " row schema does not contain " + + virtualColumn.getName() + " virtual column"); + } + return field; } private void populateProjects(RexBuilder rexBuilder, RelDataType inputRowType, @@ -211,4 +307,25 @@ private void populateProjects(RexBuilder rexBuilder, RelDataType inputRowType, i projectNames.add(relDataTypeField.getName()); } } + + private RexNode createInputRef(RelNode relNode, int negativeOffset) { + int index = relNode.getRowType().getFieldCount() - negativeOffset; + return relBuilder.getRexBuilder().makeInputRef( + relNode.getRowType().getFieldList().get(index).getType(), index); + } + + private Set findRexInputRefs(RexNode rexNode) { + Set rexTableInputRefs = new HashSet<>(); + RexVisitor visitor = new RexVisitorImpl(true) { + + @Override + public RexInputRef visitInputRef(RexInputRef inputRef) { + rexTableInputRefs.add(inputRef); + return super.visitInputRef(inputRef); + } + }; + + rexNode.accept(visitor); + return rexTableInputRefs; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/InputRefShifter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/InputRefShifter.java new file mode 100644 index 000000000000..06e6e9e5f62e --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/InputRefShifter.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import org.apache.calcite.rex.RexBuilder; +import org.apache.calcite.rex.RexInputRef; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.rex.RexShuttle; +import org.apache.calcite.tools.RelBuilder; + +/** + * Shift input reference index by the specified amount (shift) if the referenced column index is higher or equals with + * the startIndex. + */ +public class InputRefShifter extends RexShuttle { + private final int startIndex; + private final int shift; + private final RelBuilder relBuilder; + + InputRefShifter(int startIndex, int shift, RelBuilder relBuilder) { + this.startIndex = startIndex; + this.shift = shift; + this.relBuilder = relBuilder; + } + + @Override + public RexNode visitInputRef(RexInputRef inputRef) { + if (inputRef.getIndex() >= startIndex) { + RexBuilder rexBuilder = relBuilder.getRexBuilder(); + return rexBuilder.makeInputRef(inputRef.getType(), inputRef.getIndex() + shift); + } + return inputRef; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewRewritingRelVisitor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewRewritingRelVisitor.java index 49a098282851..d8ac6798221f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewRewritingRelVisitor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/MaterializedViewRewritingRelVisitor.java @@ -31,7 +31,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This class is a helper to check whether a materialized view rebuild * can be transformed from INSERT OVERWRITE to INSERT INTO. @@ -50,7 +49,7 @@ public class MaterializedViewRewritingRelVisitor extends RelVisitor { private boolean containsAggregate; - private boolean fullAcidView; + private final boolean fullAcidView; private boolean rewritingAllowed; private int countIndex; @@ -88,6 +87,7 @@ private void check(Union union) { throw new ReturnedValue(false); } // First branch should have the query (with write ID filter conditions) + RelNode queryBranch = union.getInput(0); new RelVisitor() { @Override public void visit(RelNode node, int ordinal, RelNode parent) { @@ -112,7 +112,8 @@ public void visit(RelNode node, int ordinal, RelNode parent) { throw new ReturnedValue(false); } } - }.go(union.getInput(0)); + }.go(queryBranch); + // Second branch should only have the MV new RelVisitor() { @Override @@ -177,5 +178,4 @@ public ReturnedValue(boolean value) { this.value = value; } } - } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java index dec4deddd7d7..2c0fe365dac8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/ASTConverter.java @@ -180,9 +180,7 @@ public static ASTNode emptyPlan(RelDataType dataType) { ASTBuilder select = ASTBuilder.construct(HiveParser.TOK_SELECT, "TOK_SELECT"); for (int i = 0; i < dataType.getFieldCount(); ++i) { RelDataTypeField fieldType = dataType.getFieldList().get(i); - select.add(ASTBuilder.selectExpr( - createNullField(fieldType.getType()), - fieldType.getName())); + select.add(ASTBuilder.selectExpr(createNullField(fieldType.getType()), fieldType.getName())); } ASTNode insert = ASTBuilder. @@ -203,53 +201,52 @@ private static ASTNode createNullField(RelDataType fieldType) { return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); } + ASTNode astNode = convertType(fieldType); + return ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION") + .add(astNode) + .add(HiveParser.TOK_NULL, "TOK_NULL") + .node(); + } + + static ASTNode convertType(RelDataType fieldType) { + if (fieldType.getSqlTypeName() == SqlTypeName.NULL) { + return ASTBuilder.construct(HiveParser.TOK_NULL, "TOK_NULL").node(); + } + if (fieldType.getSqlTypeName() == SqlTypeName.ROW) { - ASTBuilder namedStructCallNode = ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); - namedStructCallNode.add(HiveParser.Identifier, "named_struct"); + ASTBuilder columnListNode = ASTBuilder.construct(HiveParser.TOK_TABCOLLIST, "TOK_TABCOLLIST"); for (RelDataTypeField structFieldType : fieldType.getFieldList()) { - namedStructCallNode.add(HiveParser.Identifier, structFieldType.getName()); - namedStructCallNode.add(createNullField(structFieldType.getType())); + ASTNode colNode = ASTBuilder.construct(HiveParser.TOK_TABCOL, "TOK_TABCOL") + .add(HiveParser.Identifier, structFieldType.getName()) + .add(convertType(structFieldType.getType())) + .node(); + columnListNode.add(colNode); } - return namedStructCallNode.node(); + return ASTBuilder.construct(HiveParser.TOK_STRUCT, "TOK_STRUCT").add(columnListNode).node(); } if (fieldType.getSqlTypeName() == SqlTypeName.MAP) { - ASTBuilder mapCallNode = ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); - mapCallNode.add(HiveParser.Identifier, "map"); - mapCallNode.add(createNullField(fieldType.getKeyType())); - mapCallNode.add(createNullField(fieldType.getValueType())); + ASTBuilder mapCallNode = ASTBuilder.construct(HiveParser.TOK_MAP, "TOK_MAP"); + mapCallNode.add(convertType(fieldType.getKeyType())); + mapCallNode.add(convertType(fieldType.getValueType())); return mapCallNode.node(); } if (fieldType.getSqlTypeName() == SqlTypeName.ARRAY) { - ASTBuilder arrayCallNode = ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION"); - arrayCallNode.add(HiveParser.Identifier, "array"); - arrayCallNode.add(createNullField(fieldType.getComponentType())); + ASTBuilder arrayCallNode = ASTBuilder.construct(HiveParser.TOK_LIST, "TOK_LIST"); + arrayCallNode.add(convertType(fieldType.getComponentType())); return arrayCallNode.node(); } - return createCastNull(fieldType); - } - - private static ASTNode createCastNull(RelDataType fieldType) { HiveToken ht = TypeConverter.hiveToken(fieldType); - ASTNode typeNode; - if (ht == null) { - typeNode = ASTBuilder.construct( - HiveParser.Identifier, fieldType.getSqlTypeName().getName().toLowerCase()).node(); - } else { - ASTBuilder typeNodeBuilder = ASTBuilder.construct(ht.type, ht.text); - if (ht.args != null) { - for (String castArg : ht.args) { - typeNodeBuilder.add(HiveParser.Identifier, castArg); - } + ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); + if (ht.args != null) { + for (String castArg : ht.args) { + astBldr.add(HiveParser.Identifier, castArg); } - typeNode = typeNodeBuilder.node(); } - return ASTBuilder.construct(HiveParser.TOK_FUNCTION, "TOK_FUNCTION") - .add(typeNode) - .add(HiveParser.TOK_NULL, "TOK_NULL") - .node(); + + return astBldr.node(); } private ASTNode convert() throws CalciteSemanticException { @@ -1042,22 +1039,7 @@ public ASTNode visitCall(RexCall call) { Collections.singletonList(SqlFunctionConverter.buildAST(SqlStdOperatorTable.IS_NOT_DISTINCT_FROM, astNodeLst, call.getType())), call.getType()); case CAST: assert(call.getOperands().size() == 1); - if (call.getType().isStruct() || - SqlTypeName.MAP.equals(call.getType().getSqlTypeName()) || - SqlTypeName.ARRAY.equals(call.getType().getSqlTypeName())) { - // cast for complex types can be ignored safely because explicit casting on such - // types are not possible, implicit casting e.g. CAST(ROW__ID as <...>) can be ignored - return call.getOperands().get(0).accept(this); - } - - HiveToken ht = TypeConverter.hiveToken(call.getType()); - ASTBuilder astBldr = ASTBuilder.construct(ht.type, ht.text); - if (ht.args != null) { - for (String castArg : ht.args) { - astBldr.add(HiveParser.Identifier, castArg); - } - } - astNodeLst.add(astBldr.node()); + astNodeLst.add(convertType(call.getType())); astNodeLst.add(call.getOperands().get(0).accept(this)); break; case EXTRACT: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java index 89d6024cc06e..e6c41c3b6206 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/RexNodeConverter.java @@ -76,11 +76,14 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFCase; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFIn; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFTimestamp; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToArray; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToBinary; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToChar; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDate; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToDecimal; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToMap; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToString; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToStruct; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToTimestampLocalTZ; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToUnixTimeStamp; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFToVarchar; @@ -334,7 +337,10 @@ public static RexNode handleExplicitCast(GenericUDF udf, RelDataType returnType, || (udf instanceof GenericUDFToString) || (udf instanceof GenericUDFToDecimal) || (udf instanceof GenericUDFToDate) || (udf instanceof GenericUDFTimestamp) || (udf instanceof GenericUDFToTimestampLocalTZ) - || (udf instanceof GenericUDFToBinary) || castExprUsingUDFBridge(udf)) { + || (udf instanceof GenericUDFToBinary) || castExprUsingUDFBridge(udf) + || (udf instanceof GenericUDFToMap) + || (udf instanceof GenericUDFToArray) + || (udf instanceof GenericUDFToStruct)) { castExpr = rexBuilder.makeAbstractCast(returnType, childRexNodeLst.get(0)); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java index 7a363aafd1fc..a215dec11485 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/calcite/translator/opconventer/HiveGBOpConvUtil.java @@ -133,8 +133,8 @@ private static class GBInfo { private static HIVEGBPHYSICALMODE getAggOPMode(HiveConf hc, GBInfo gbInfo) { HIVEGBPHYSICALMODE gbPhysicalPipelineMode = HIVEGBPHYSICALMODE.MAP_SIDE_GB_NO_SKEW_NO_ADD_MR_JOB; - if (hc.getBoolVar(HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { - if (!hc.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (hc.getBoolVar(HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { + if (!hc.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { if (!gbInfo.grpSetRqrAdditionalMRJob) { gbPhysicalPipelineMode = HIVEGBPHYSICALMODE.MAP_SIDE_GB_NO_SKEW_NO_ADD_MR_JOB; } else { @@ -148,7 +148,7 @@ private static HIVEGBPHYSICALMODE getAggOPMode(HiveConf hc, GBInfo gbInfo) { } } } else { - if (!hc.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (!hc.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { gbPhysicalPipelineMode = HIVEGBPHYSICALMODE.NO_MAP_SIDE_GB_NO_SKEW; } else { gbPhysicalPipelineMode = HIVEGBPHYSICALMODE.NO_MAP_SIDE_GB_SKEW; @@ -283,11 +283,11 @@ private static GBInfo getGBInfo(HiveAggregate aggRel, OpAttr inputOpAf, HiveConf } // 4. Gather GB Memory threshold - gbInfo.groupByMemoryUsage = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); - gbInfo.memoryThreshold = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); - gbInfo.minReductionHashAggr = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + gbInfo.groupByMemoryUsage = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); + gbInfo.memoryThreshold = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); + gbInfo.minReductionHashAggr = HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); gbInfo.minReductionHashAggrLowerBound = - HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + HiveConf.getFloatVar(hc, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); // 5. Gather GB Physical pipeline (based on user config & Grping Sets size) gbInfo.gbPhysicalPipelineMode = getAggOPMode(hc, gbInfo); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java index 5ca64fc4b6f6..a699f58b8f54 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/AbstractCorrelationProcCtx.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hive.ql.optimizer.correlation; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESCRIPTOPERATORTRUST; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_SCRIPT_OPERATOR_TRUST; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE; import java.util.HashSet; import java.util.Set; @@ -44,9 +44,9 @@ abstract class AbstractCorrelationProcCtx implements NodeProcessorCtx { public AbstractCorrelationProcCtx(ParseContext pctx) { removedOps = new HashSet>(); - trustScript = pctx.getConf().getBoolVar(HIVESCRIPTOPERATORTRUST); - minReducer = pctx.getConf().getIntVar(HIVEOPTREDUCEDEDUPLICATIONMINREDUCER); - isMapAggr = pctx.getConf().getBoolVar(HIVEMAPSIDEAGGREGATE); + trustScript = pctx.getConf().getBoolVar(HIVE_SCRIPT_OPERATOR_TRUST); + minReducer = pctx.getConf().getIntVar(HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER); + isMapAggr = pctx.getConf().getBoolVar(HIVE_MAPSIDE_AGGREGATE); this.pctx = pctx; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java index 43870562a3d7..19a2295a0c10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/CorrelationOptimizer.java @@ -178,7 +178,7 @@ private void findPossibleAutoConvertedJoinOperators() throws SemanticException { } long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(pCtx.getConf(), - HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); for (int i = 0; i < numAliases; i++) { // this table cannot be big table if (!bigTableCandidates.contains(i)) { @@ -212,7 +212,7 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { pCtx = pctx; - if (HiveConf.getBoolVar(pCtx.getConf(),HiveConf.ConfVars.HIVECONVERTJOIN)) { + if (HiveConf.getBoolVar(pCtx.getConf(),HiveConf.ConfVars.HIVE_CONVERT_JOIN)) { findPossibleAutoConvertedJoinOperators(); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java index dc5c97d5a32f..51af5b7cd161 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplication.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hive.ql.optimizer.correlation; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVECONVERTJOIN; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CONVERT_JOIN; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONALTASK; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -73,10 +73,10 @@ public ParseContext transform(ParseContext pctx) throws SemanticException { ReduceSinkDeduplicateProcCtx cppCtx = new ReduceSinkDeduplicateProcCtx(pGraphContext); // for auto convert map-joins, it not safe to dedup in here (todo) - boolean mergeJoins = !pctx.getConf().getBoolVar(HIVECONVERTJOIN) && - !pctx.getConf().getBoolVar(HIVECONVERTJOINNOCONDITIONALTASK) && + boolean mergeJoins = !pctx.getConf().getBoolVar(HIVE_CONVERT_JOIN) && + !pctx.getConf().getBoolVar(HIVE_CONVERT_JOIN_NOCONDITIONALTASK) && !pctx.getConf().getBoolVar(ConfVars.HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ) && - !pctx.getConf().getBoolVar(ConfVars.HIVEDYNAMICPARTITIONHASHJOIN); + !pctx.getConf().getBoolVar(ConfVars.HIVE_DYNAMIC_PARTITION_HASHJOIN); // If multiple rules can be matched with same cost, last rule will be choosen as a processor // see DefaultRuleDispatcher#dispatch() @@ -329,7 +329,7 @@ public Object process(ReduceSinkOperator cRS, GroupByOperator cGBY, start, ReduceSinkOperator.class, dedupCtx.trustScript()); if (pRS != null && ReduceSinkDeDuplicationUtils .merge(dedupCtx.getPctx().getConf(), cRS, pRS, dedupCtx.minReducer())) { - if (dedupCtx.getPctx().getConf().getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (dedupCtx.getPctx().getConf().getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { return false; } CorrelationUtilities.removeReduceSinkForGroupBy(cRS, cGBY, dedupCtx.getPctx(), dedupCtx); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java index 9327c643d768..27e56dfead17 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/ReduceSinkDeDuplicationUtils.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hive.ql.optimizer.correlation; -import java.lang.reflect.Field; import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; @@ -33,14 +32,12 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.optimizer.correlation.ReduceSinkDeDuplication.ReduceSinkDeduplicateProcCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDescUtils; import org.apache.hadoop.hive.ql.plan.OperatorDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.plan.TableDesc; import com.google.common.collect.ImmutableList; @@ -149,7 +146,7 @@ public static boolean merge(HiveConf hiveConf, ReduceSinkOperator cRS, ReduceSin // child RS but Sorting order of the child RS is more specific than // that of the parent RS. throw new SemanticException("Sorting columns and order don't match. " + - "Try set " + HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION + "=false;"); + "Try set " + HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION + "=false;"); } pRS.getConf().setOrder(cRS.getConf().getOrder()); pRS.getConf().setNullOrder(cRS.getConf().getNullOrder()); @@ -210,7 +207,7 @@ private static long estimateReducers(HiveConf conf, ReduceSinkOperator rs) { if (rs.getConf().getNumReducers() > 0) { return rs.getConf().getNumReducers(); } - int constantReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + int constantReducers = conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); if (constantReducers > 0) { return constantReducers; } @@ -221,8 +218,8 @@ private static long estimateReducers(HiveConf conf, ReduceSinkOperator rs) { inputTotalBytes = StatsUtils.safeAdd(inputTotalBytes, sibling.getStatistics().getDataSize()); } } - int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); - long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); + long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); return Utilities.estimateReducers(inputTotalBytes, bytesPerReducer, maxReducers, false); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java index 28e8d12dcead..86dcb10b557c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/CommonJoinTaskDispatcher.java @@ -143,7 +143,7 @@ private long calculateLocalTableTotalSize(MapredLocalWork localWork) { * Check if the total size of local tables will be under * the limit after we merge localWork1 and localWork2. * The limit of the total size of local tables is defined by - * HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD. + * HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD. * @param conf * @param localWorks * @return @@ -152,7 +152,7 @@ private boolean isLocalTableTotalSizeUnderLimitAfterMerge( Configuration conf, MapredLocalWork... localWorks) { final long localTableTotalSizeLimit = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); long localTableTotalSize = 0; for (int i = 0; i < localWorks.length; i++) { final long localWorkTableTotalSize = calculateLocalTableTotalSize(localWorks[i]); @@ -166,7 +166,7 @@ private boolean isLocalTableTotalSizeUnderLimitAfterMerge( if (localTableTotalSize > localTableTotalSizeLimit) { // The total size of local tables after we merge localWorks // is larger than the limit set by - // HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD. + // HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD. return false; } @@ -431,12 +431,12 @@ public Task processCurrentTask(MapRedTask currTask, // If sizes of at least n-1 tables in a n-way join is known, and their sum is smaller than // the threshold size, convert the join into map-join and don't create a conditional task boolean convertJoinMapJoin = HiveConf.getBoolVar(conf, - HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASK); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONALTASK); int bigTablePosition = -1; if (convertJoinMapJoin) { // This is the threshold that the user has specified to fit in mapjoin long mapJoinSize = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); Long bigTableSize = null; Set aliases = aliasToWork.keySet(); @@ -480,7 +480,7 @@ public Task processCurrentTask(MapRedTask currTask, } long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); for (int pos = 0; pos < joinOp.getNumParent(); pos++) { // this table cannot be big table if (!bigTableCandidates.contains(pos)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java index 9c9dac07a671..d846428f78ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java @@ -147,7 +147,7 @@ public static void processSkewJoin(JoinOperator joinOp, joinDescriptor.setBigKeysDirMap(bigKeysDirMap); joinDescriptor.setSmallKeysDirMap(smallKeysDirMap); joinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), - HiveConf.ConfVars.HIVESKEWJOINKEY)); + HiveConf.ConfVars.HIVE_SKEWJOIN_KEY)); HashMap> bigKeysDirToTaskMap = new HashMap>(); @@ -323,9 +323,9 @@ public static void processSkewJoin(JoinOperator joinOp, GenMRSkewJoinProcessor.class); newPlan.setNumMapTasks(HiveConf - .getIntVar(jc, HiveConf.ConfVars.HIVESKEWJOINMAPJOINNUMMAPTASK)); + .getIntVar(jc, HiveConf.ConfVars.HIVE_SKEWJOIN_MAPJOIN_NUM_MAP_TASK)); newPlan - .setMinSplitSize(HiveConf.getLongVar(jc, HiveConf.ConfVars.HIVESKEWJOINMAPJOINMINSPLIT)); + .setMinSplitSize(HiveConf.getLongVar(jc, HiveConf.ConfVars.HIVE_SKEWJOIN_MAPJOIN_MIN_SPLIT)); newPlan.setInputformat(HiveInputFormat.class.getName()); MapredWork w = new MapredWork(); @@ -366,7 +366,7 @@ public static void processSkewJoin(JoinOperator joinOp, public static boolean skewJoinEnabled(HiveConf conf, JoinOperator joinOp) { - if (conf != null && !conf.getBoolVar(HiveConf.ConfVars.HIVESKEWJOIN)) { + if (conf != null && !conf.getBoolVar(HiveConf.ConfVars.HIVE_SKEW_JOIN)) { return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java index 666da19e5813..2fef7f570b15 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LlapDecider.java @@ -173,7 +173,7 @@ private void adjustAutoParallelism(BaseWork work) { clusterState.initClusterInfo(); final int targetCount; final int executorCount; - final int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + final int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); if (!clusterState.hasClusterInfo()) { LOG.warn("Cannot determine LLAP cluster information"); executorCount = executorsPerNode; // assume 1 node @@ -190,7 +190,7 @@ private void adjustAutoParallelism(BaseWork work) { if (newMin < reduceWork.getMaxReduceTasks()) { reduceWork.setMinReduceTasks(newMin); reduceWork.getEdgePropRef().setAutoReduce(conf, true, newMin, - reduceWork.getMaxReduceTasks(), conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER), + reduceWork.getMaxReduceTasks(), conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER), reduceWork.getMinSrcFraction(), reduceWork.getMaxSrcFraction()); } else { reduceWork.setAutoReduceParallelism(false); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java index ca840d7f4eb4..9d8985fe62ec 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/LocalMapJoinProcFactory.java @@ -96,7 +96,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. context.setFollowedByGroupBy(true); GroupByOperator groupByOp = (GroupByOperator) nd; float groupByMemoryUsage = context.getParseCtx().getConf().getFloatVar( - HiveConf.ConfVars.HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY); + HiveConf.ConfVars.HIVE_MAPJOIN_FOLLOWEDBY_MAP_AGGR_HASH_MEMORY); groupByOp.getConf().setGroupByMemoryUsage(groupByMemoryUsage); return null; } @@ -130,10 +130,10 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. float hashtableMemoryUsage; if (context.isFollowedByGroupBy()) { hashtableMemoryUsage = conf.getFloatVar( - HiveConf.ConfVars.HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE); + HiveConf.ConfVars.HIVE_HASHTABLE_FOLLOWBY_GBY_MAX_MEMORY_USAGE); } else { hashtableMemoryUsage = conf.getFloatVar( - HiveConf.ConfVars.HIVEHASHTABLEMAXMEMORYUSAGE); + HiveConf.ConfVars.HIVE_HASHTABLE_MAX_MEMORY_USAGE); } mapJoinDesc.setHashTableMemoryUsage(hashtableMemoryUsage); LOG.info("Setting max memory usage to " + hashtableMemoryUsage + " for table sink " @@ -148,7 +148,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, Object.. // todo: support tez/vectorization boolean useNontaged = conf.getBoolVar( - HiveConf.ConfVars.HIVECONVERTJOINUSENONSTAGED) && + HiveConf.ConfVars.HIVE_CONVERT_JOIN_USE_NONSTAGED) && conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") && !conf.getBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java index 8e51417f12c6..14f322e55a95 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/MemoryDecider.java @@ -73,9 +73,9 @@ public class MemoryCalculator implements SemanticDispatcher { public MemoryCalculator(PhysicalContext pctx) { this.pctx = pctx; - this.totalAvailableMemory = HiveConf.getLongVar(pctx.conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); - this.minimumHashTableSize = HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS) - * HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVEHYBRIDGRACEHASHJOINMINWBSIZE); + this.totalAvailableMemory = HiveConf.getLongVar(pctx.conf, HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); + this.minimumHashTableSize = HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_NUM_PARTITIONS) + * HiveConf.getIntVar(pctx.conf, HiveConf.ConfVars.HIVE_HYBRIDGRACE_HASHJOIN_MIN_WB_SIZE); this.inflationFactor = HiveConf.getFloatVar(pctx.conf, HiveConf.ConfVars.HIVE_HASH_TABLE_INFLATION_FACTOR); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java index 8903eb738107..0ed3b3589381 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/PhysicalOptimizer.java @@ -22,7 +22,6 @@ import java.util.List; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; /** @@ -46,7 +45,7 @@ public PhysicalOptimizer(PhysicalContext pctx, HiveConf hiveConf) { */ private void initialize(HiveConf hiveConf) { resolvers = new ArrayList(); - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVECONVERTJOIN)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN)) { resolvers.add(new CommonJoinResolver()); // The joins have been automatically converted to map-joins. @@ -56,18 +55,18 @@ private void initialize(HiveConf hiveConf) { resolvers.add(new SortMergeJoinResolver()); } } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVESKEWJOIN)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SKEW_JOIN)) { resolvers.add(new SkewJoinResolver()); } resolvers.add(new MapJoinResolver()); - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES)) { resolvers.add(new MetadataOnlyOptimizer()); } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVENULLSCANOPTIMIZE)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_NULL_SCAN_OPTIMIZE)) { resolvers.add(new NullScanOptimizer()); } - if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVESAMPLINGFORORDERBY)) { + if (hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SAMPLING_FOR_ORDERBY)) { resolvers.add(new SamplingOptimizer()); } @@ -91,7 +90,7 @@ private void initialize(HiveConf hiveConf) { "enable")) { resolvers.add(new Vectorizer()); } - if (!"none".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) { + if (!"none".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE))) { resolvers.add(new StageIDsRearranger()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java index 54373effe23f..d67c5d720388 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SortMergeJoinTaskDispatcher.java @@ -274,7 +274,7 @@ public Task processCurrentTask(MapRedTask currTask, pathToAliases, aliasToSize); long ThresholdOfSmallTblSizeSum = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); for (int bigTablePosition = 0; bigTablePosition < numAliases; bigTablePosition++) { // this table cannot be big table diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java index 02bbf6a99511..51a79650e602 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/StageIDsRearranger.java @@ -52,7 +52,7 @@ public PhysicalContext resolve(PhysicalContext pctx) throws SemanticException { } private static List getExplainOrder(PhysicalContext pctx) { - List tasks = getExplainOrder(pctx.getRootTasks(), pctx.getConf().getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE)); + List tasks = getExplainOrder(pctx.getRootTasks(), pctx.getConf().getVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE)); if (pctx.getFetchTask() != null) { tasks.add(pctx.getFetchTask()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index ce2e31d58800..a33e5627b4bd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -3912,7 +3912,7 @@ private boolean canSpecializeMapJoin(Operator op, MapJoi vectorMapJoinInfo.setBigTableFilterExpressions(bigTableFilterExpressions); boolean useOptimizedTable = - HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE); + HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE); // Remember the condition variables for EXPLAIN regardless of whether we specialize or not. vectorDesc.setVectorMapJoinInfo(vectorMapJoinInfo); @@ -5330,7 +5330,7 @@ public Operator validateAndVectorizeOperator(Operator partitio List partNames = Hive.get().getPartitionNames( tab.getDbName(), tab.getTableName(), (short) -1); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List partCols = extractPartColNames(tab); List partColTypeInfos = extractPartColTypes(tab); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index d7744587e689..79ca3f6220da 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -169,8 +169,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Table table = tsop.getConf().getTableMetadata(); try { - // gather statistics for the first time and the attach it to table scan operator - Statistics stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, colStatsCached, table, tsop); + Statistics stats; + if (table.isMaterializedTable()) { + stats = tsop.getStatistics(); + } else { + // gather statistics for the first time and attach it to table scan operator + stats = StatsUtils.collectStatistics(aspCtx.getConf(), partList, colStatsCached, table, tsop); + } stats = applyRuntimeStats(aspCtx.getParseContext().getContext(), stats, tsop); tsop.setStatistics(stats); @@ -1528,7 +1533,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, AnnotateStatsProcCtx aspCtx = (AnnotateStatsProcCtx) procCtx; HiveConf conf = aspCtx.getConf(); - long maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); + long maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE); List aggDesc = gop.getConf().getAggregators(); Map colExprMap = gop.getColumnExprMap(); RowSchema rs = gop.getSchema(); @@ -1577,7 +1582,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // be updated to bytes per reducer (1GB default) if (top == null) { inputSize = parentStats.getDataSize(); - maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.BYTESPERREDUCER); + maxSplitSize = HiveConf.getLongVar(conf, HiveConf.ConfVars.BYTES_PER_REDUCER); } else { inputSize = top.getConf().getStatistics().getDataSize(); } @@ -1875,7 +1880,7 @@ private long getParentNumRows(GroupByOperator op, List gbyKeys, Hi /** * This method does not take into account many configs used at runtime to - * disable hash aggregation like HIVEMAPAGGRHASHMINREDUCTION. This method + * disable hash aggregation like HIVE_MAP_AGGR_HASH_MIN_REDUCTION. This method * roughly estimates the number of rows and size of each row to see if it * can fit in hashtable for aggregation. * @param gop - group by operator @@ -1891,8 +1896,8 @@ private boolean checkMapSideAggregation(GroupByOperator gop, GroupByDesc.Mode mode = desc.getMode(); if (mode.equals(GroupByDesc.Mode.HASH)) { - float hashAggMem = conf.getFloatVar(HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); - float hashAggMaxThreshold = conf.getFloatVar(HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + float hashAggMem = conf.getFloatVar(HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); + float hashAggMaxThreshold = conf.getFloatVar(HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); // get available map memory in bytes long totalMemory = DagUtils.getContainerResource(conf).getMemorySize() * 1024L * 1024L; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index bc36832b6d1a..54b6587ba99b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -1197,9 +1197,9 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit try { // get table metadata tableName = HiveTableName.withNoDefault(getUnescapedName((ASTNode)ast.getChild(0))); - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE); if (testMode) { - tableName = TableName.fromString(String.join("", conf.getVar(HiveConf.ConfVars.HIVETESTMODEPREFIX), + tableName = TableName.fromString(String.join("", conf.getVar(HiveConf.ConfVars.HIVE_TEST_MODE_PREFIX), tableName.getTable()), tableName.getCat(), tableName.getDb()); // not that elegant, but hard to refactor } if (ast.getToken().getType() != HiveParser.TOK_CREATETABLE && @@ -1274,7 +1274,7 @@ public TableSpec(Hive db, HiveConf conf, ASTNode ast, boolean allowDynamicPartit numStaPart = parts.size() - numDynParts; } if (numStaPart == 0 && - conf.getVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equalsIgnoreCase("strict")) { + conf.getVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equalsIgnoreCase("strict")) { throw new SemanticException(ErrorMsg.DYNAMIC_PARTITION_STRICT_MODE.getMsg()); } @@ -1613,7 +1613,7 @@ private static boolean getPartExprNodeDesc(ASTNode astNode, HiveConf conf, } TypeCheckCtx typeCheckCtx = new TypeCheckCtx(null); - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); boolean result = true; for (Node childNode : astNode.getChildren()) { ASTNode childASTNode = (ASTNode)childNode; @@ -1803,8 +1803,7 @@ private static String normalizeDateCol(Object colValue) throws SemanticException throw new SemanticException("Unexpected date type " + colValue.getClass()); } try { - return MetaStoreUtils.convertDateToString( - MetaStoreUtils.convertStringToDate(value.toString())); + return MetaStoreUtils.normalizeDate(value.toString()); } catch (Exception e) { throw new SemanticException(e); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CBOPlan.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CBOPlan.java index 4230ef07b4a6..53beeeb6df72 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CBOPlan.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CBOPlan.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hive.ql.parse; import org.apache.calcite.rel.RelNode; +import org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm; + +import java.util.Set; /** * Wrapper of Calcite plan. @@ -26,12 +29,12 @@ public class CBOPlan { private final ASTNode ast; private final RelNode plan; - private final String invalidAutomaticRewritingMaterializationReason; + private final Set supportedRewriteAlgorithms; - public CBOPlan(ASTNode ast, RelNode plan, String invalidAutomaticRewritingMaterializationReason) { + public CBOPlan(ASTNode ast, RelNode plan, Set supportedRewriteAlgorithms) { this.ast = ast; this.plan = plan; - this.invalidAutomaticRewritingMaterializationReason = invalidAutomaticRewritingMaterializationReason; + this.supportedRewriteAlgorithms = supportedRewriteAlgorithms; } public ASTNode getAst() { @@ -52,7 +55,7 @@ public RelNode getPlan() { * Null or empty string otherwise. * @return String contains error message or null. */ - public String getInvalidAutomaticRewritingMaterializationReason() { - return invalidAutomaticRewritingMaterializationReason; + public Set getSupportedRewriteAlgorithms() { + return supportedRewriteAlgorithms; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index b105de8174ee..40d9167c74f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -355,7 +355,7 @@ import static org.apache.commons.lang3.StringUtils.isBlank; import static org.apache.hadoop.hive.ql.optimizer.calcite.HiveMaterializedViewASTSubQueryRewriteShuttle.getMaterializedViewByAST; -import static org.apache.hadoop.hive.ql.metadata.HiveRelOptMaterialization.RewriteAlgorithm.ANY; +import static org.apache.hadoop.hive.ql.metadata.RewriteAlgorithm.ANY; public class CalcitePlanner extends SemanticAnalyzer { @@ -506,9 +506,9 @@ public static RelOptPlanner createPlanner(HiveConf conf) { private static RelOptPlanner createPlanner( HiveConf conf, StatsSource statsSource, boolean isExplainPlan) { final Double maxSplitSize = (double) HiveConf.getLongVar( - conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE); + conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE); final Double maxMemory = (double) HiveConf.getLongVar( - conf, HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD); + conf, HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD); HiveAlgorithmsConf algorithmsConf = new HiveAlgorithmsConf(maxSplitSize, maxMemory); HiveRulesRegistry registry = new HiveRulesRegistry(); Properties calciteConfigProperties = new Properties(); @@ -1071,9 +1071,7 @@ Table materializeCTE(String cteName, CTEClause cte) throws HiveException { LOG.info(cteName + " will be materialized into " + location); cte.source = analyzer; - ctx.addMaterializedTable(cteName, table); - // For CalcitePlanner, store qualified name too - ctx.addMaterializedTable(table.getFullyQualifiedName(), table); + ctx.addMaterializedTable(cteName, table, getMaterializedTableStats(analyzer.getSinkOp(), table)); return table; } @@ -1681,8 +1679,8 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu materializationValidator.validate(calcitePlan); setInvalidResultCacheReason( materializationValidator.getResultCacheInvalidReason()); - setInvalidAutomaticRewritingMaterializationReason( - materializationValidator.getAutomaticRewritingInvalidReason()); + setMaterializationValidationResult( + materializationValidator.getAutomaticRewritingValidationResult()); // 2. Apply pre-join order optimizations calcitePlan = applyPreJoinOrderingTransforms(calcitePlan, mdProvider.getMetadataProvider(), executorProvider); @@ -1745,7 +1743,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr PerfLogger perfLogger = SessionState.getPerfLogger(); final int maxCNFNodeCount = conf.getIntVar(HiveConf.ConfVars.HIVE_CBO_CNF_NODES_LIMIT); - final int minNumORClauses = conf.getIntVar(HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN); + final int minNumORClauses = conf.getIntVar(HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER_MIN); final boolean allowDisjunctivePredicates = conf.getBoolVar(ConfVars.HIVE_JOIN_DISJ_TRANSITIVE_PREDICATES_PUSHDOWN); final HepProgramBuilder program = new HepProgramBuilder(); @@ -1790,7 +1788,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr } // Run this optimization early, since it is expanding the operator pipeline. if (!conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("mr") && - conf.getBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEDISTINCTREWRITE)) { + conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_DISTINCT_REWRITE)) { // Its not clear, if this rewrite is always performant on MR, since extra map phase // introduced for 2nd MR job may offset gains of this multi-stage aggregation. // We need a cost model for MR to enable this on MR. @@ -1809,7 +1807,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr // 3. Run exhaustive PPD, add not null filters, transitive inference, // constant propagation, constant folding List rules = Lists.newArrayList(); - if (conf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_WINDOWING)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_PPD_WINDOWING)) { rules.add(HiveFilterProjectTransposeRule.DETERMINISTIC_WINDOWING); } else { rules.add(HiveFilterProjectTransposeRule.DETERMINISTIC); @@ -1836,7 +1834,7 @@ protected RelNode applyPreJoinOrderingTransforms(RelNode basePlan, RelMetadataPr rules.add(HiveReduceExpressionsRule.SEMIJOIN_INSTANCE); rules.add(HiveAggregateReduceFunctionsRule.INSTANCE); rules.add(HiveAggregateReduceRule.INSTANCE); - if (conf.getBoolVar(HiveConf.ConfVars.HIVEPOINTLOOKUPOPTIMIZER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER)) { rules.add(new HivePointLookupOptimizerRule.FilterCondition(minNumORClauses)); rules.add(new HivePointLookupOptimizerRule.JoinCondition(minNumORClauses)); rules.add(new HivePointLookupOptimizerRule.ProjectionExpressions(minNumORClauses)); @@ -2156,7 +2154,7 @@ private RelNode applyJoinOrderingTransform(RelNode basePlan, RelMetadataProvider rules.add(HiveJoinProjectTransposeRule.RIGHT_PROJECT_BTW_JOIN); rules.add(HiveProjectMergeRule.INSTANCE); if (profilesCBO.contains(ExtendedCBOProfile.REFERENTIAL_CONSTRAINTS)) { - rules.add(conf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_WINDOWING) ? + rules.add(conf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_PPD_WINDOWING) ? HiveFilterProjectTransposeRule.DETERMINISTIC_WINDOWING_ON_NON_FILTERING_JOIN : HiveFilterProjectTransposeRule.DETERMINISTIC_ON_NON_FILTERING_JOIN); rules.add(HiveFilterJoinRule.FILTER_ON_NON_FILTERING_JOIN); @@ -2899,7 +2897,7 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc // 2. if returnpath is on and hivetestmode is on bail if (qb.getParseInfo().getTabSample(tableAlias) != null || getNameToSplitSampleMap().containsKey(tableAlias) - || (conf.getBoolVar(HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) && (conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE)) ) { + || (conf.getBoolVar(HiveConf.ConfVars.HIVE_CBO_RETPATH_HIVEOP)) && (conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE)) ) { String msg = String.format("Table Sample specified for %s." + " Currently we don't support Table Sample clauses in CBO," + " turn off cbo for queries on tableSamples.", tableAlias); @@ -3663,16 +3661,16 @@ private RelNode genGBLogicalPlan(QB qb, RelNode srcRel) throws SemanticException || !qbp.getDestGroupingSets().isEmpty() || !qbp.getDestCubes().isEmpty()); // 2. Sanity check - if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW) + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW) && qbp.getDistinctFuncExprsForClause(destClauseName).size() > 1) { throw new SemanticException(ErrorMsg.UNSUPPORTED_MULTIPLE_DISTINCTS.getMsg()); } if (cubeRollupGrpSetPresent) { - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { throw new SemanticException(ErrorMsg.HIVE_GROUPING_SETS_AGGR_NOMAPAGGR.getMsg()); } - if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { if (qbp.getDestGroupingSets().size() > conf .getIntVar(HiveConf.ConfVars.HIVE_NEW_JOB_GROUPING_SET_CARDINALITY)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java index 5d4768000a96..7424d4e8ea86 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ColumnStatsSemanticAnalyzer.java @@ -123,7 +123,7 @@ private void handlePartialPartitionSpec(Map partSpec, ColumnStat partValsSpecified += partSpec.get(partKey) == null ? 0 : 1; } try { - // for static partition, it may not exist when HIVESTATSCOLAUTOGATHER is + // for static partition, it may not exist when HIVE_STATS_COL_AUTOGATHER is // set to true if (context == null) { if ((partValsSpecified == tbl.getPartitionKeys().size()) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java index 029ea483a7fe..535d8d855ed3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExecuteStatementAnalyzer.java @@ -205,9 +205,9 @@ public void analyzeInternal(ASTNode root) throws SemanticException { this.prepareQuery = false; // reset config - String queryId = this.conf.getVar(HiveConf.ConfVars.HIVEQUERYID); + String queryId = this.conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID); this.conf.syncFromConf(cachedPlan.getQueryState().getConf()); - this.conf.setVar(HiveConf.ConfVars.HIVEQUERYID, queryId); + this.conf.setVar(HiveConf.ConfVars.HIVE_QUERY_ID, queryId); // set rest of the params this.inputs = cachedPlan.getInputs(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java index 4add301fb0ae..91cda5591981 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/EximUtil.java @@ -259,8 +259,8 @@ private EximUtil() { */ public static URI getValidatedURI(HiveConf conf, String dcPath) throws SemanticException { try { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) - || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE); + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVE_EXIM_TEST_MODE); URI uri = new Path(dcPath).toUri(); FileSystem fs = FileSystem.get(uri, conf); // Get scheme from FileSystem @@ -316,8 +316,8 @@ static void validateTable(org.apache.hadoop.hive.ql.metadata.Table table) throws public static String relativeToAbsolutePath(HiveConf conf, String location) throws SemanticException { try { - boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODE) - || conf.getBoolVar(HiveConf.ConfVars.HIVEEXIMTESTMODE);; + boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE) + || conf.getBoolVar(HiveConf.ConfVars.HIVE_EXIM_TEST_MODE);; if (testMode) { URI uri = new Path(location).toUri(); FileSystem fs = FileSystem.get(uri, conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java index baa31c224ea8..ac0b3ead9f51 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java @@ -221,7 +221,7 @@ public void analyzeInternal(ASTNode ast) throws SemanticException { ctx.getCalcitePlan()); work.setAppendTaskType( - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES)); + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_EXPLAIN_DEPENDENCY_APPEND_TASK_TYPES)); ExplainTask explTask = (ExplainTask) TaskFactory.get(work); @@ -282,4 +282,11 @@ public boolean skipAuthorization() { } return super.skipAuthorization(); } + + @Override + public void startAnalysis() { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_HMS_QUERY_CACHE_ENABLED)) { + queryState.createHMSCache(); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java index 6042c0982149..6688a4256f7c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezProcContext.java @@ -173,7 +173,7 @@ public GenTezProcContext(HiveConf conf, ParseContext parseContext, this.inputs = inputs; this.outputs = outputs; this.currentTask = (TezTask) TaskFactory.get( - new TezWork(conf.getVar(HiveConf.ConfVars.HIVEQUERYID), conf)); + new TezWork(conf.getVar(HiveConf.ConfVars.HIVE_QUERY_ID), conf)); this.leafOperatorToFollowingWork = new LinkedHashMap, BaseWork>(); this.linkOpWithWorkMap = new LinkedHashMap, Map>(); this.linkWorkWithReduceSinkMap = new LinkedHashMap>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java index 1d2eab7e1449..a2512500e37b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezUtils.java @@ -46,7 +46,6 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.*; -import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.GenMapRedUtils; import org.apache.hadoop.hive.ql.plan.*; import org.apache.hadoop.hive.ql.plan.TezEdgeProperty.EdgeType; @@ -96,7 +95,7 @@ public static ReduceWork createReduceWork( float maxPartitionFactor = context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MAX_PARTITION_FACTOR); float minPartitionFactor = context.conf.getFloatVar(HiveConf.ConfVars.TEZ_MIN_PARTITION_FACTOR); - long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); int defaultTinyBufferSize = context.conf.getIntVar(HiveConf.ConfVars.TEZ_SIMPLE_CUSTOM_EDGE_TINY_BUFFER_SIZE_MB); ReduceWork reduceWork = new ReduceWork(Utilities.REDUCENAME + context.nextSequenceNumber()); @@ -126,7 +125,7 @@ public static ReduceWork createReduceWork( if (isAutoReduceParallelism && reduceSink.getConf().getReducerTraits().contains(AUTOPARALLEL)) { // configured limit for reducers - final int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); + final int maxReducers = context.conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); // estimated number of reducers final int nReducers = reduceSink.getConf().getNumReducers(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java index 736e562c1afb..e112b55031a4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/GenTezWork.java @@ -372,7 +372,7 @@ public Object process(Node nd, Stack stack, if (context.leafOperatorToFollowingWork.containsKey(operator)) { BaseWork followingWork = context.leafOperatorToFollowingWork.get(operator); - long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER); + long bytesPerReducer = context.conf.getLongVar(HiveConf.ConfVars.BYTES_PER_REDUCER); LOG.debug("Second pass. Leaf operator: "+operator +" has common downstream work: "+followingWork); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java index 3a18b7af729b..271b384aba4e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java @@ -351,7 +351,7 @@ private void analyzeLoad(ASTNode ast) throws SemanticException { // for managed tables, make sure the file formats match if (TableType.MANAGED_TABLE.equals(ts.tableHandle.getTableType()) - && conf.getBoolVar(HiveConf.ConfVars.HIVECHECKFILEFORMAT)) { + && conf.getBoolVar(HiveConf.ConfVars.HIVE_CHECK_FILEFORMAT)) { ensureFileFormatsMatch(ts, files, fromURI); } inputs.add(toReadEntity(new Path(fromURI))); @@ -426,7 +426,7 @@ private void analyzeLoad(ASTNode ast) throws SemanticException { // However, some other stats, like number of files, do not require a complete scan // Update the stats which do not require a complete scan. Task statTask = null; - if (conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { BasicStatsWork basicStatsWork = new BasicStatsWork(loadTableWork); basicStatsWork.setNoStatsAggregator(true); basicStatsWork.setClearAggregatorStats(true); @@ -557,7 +557,7 @@ private void reparseAndSuperAnalyze(Table table, URI fromURI) throws SemanticExc // Step 3 : parse the query // Set dynamic partitioning to nonstrict so that queries do not need any partition // references. - HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); // Parse the rewritten query string Context rewrittenCtx; rewrittenCtx = new Context(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java index 2a3b60e52bb6..376ba8708ca1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/MapReduceCompiler.java @@ -185,7 +185,7 @@ private static int getNumberOfReducers(MapredWork mrwork, HiveConf conf) { return mrwork.getReduceWork().getNumReduceTasks(); } - return conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + return conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); } @Override @@ -200,7 +200,7 @@ protected void decideExecMode(List> rootTasks, Context ctx, // user has told us to run in local mode or doesn't want auto-local mode if (ctx.isLocalOnlyExecutionMode() || - !conf.getBoolVar(HiveConf.ConfVars.LOCALMODEAUTO)) { + !conf.getBoolVar(HiveConf.ConfVars.LOCAL_MODE_AUTO)) { return; } @@ -230,11 +230,11 @@ public boolean accept(Path file) { // estimated Input = (num_limit * max_size_per_row) * (estimated_map + 2) // long sizePerRow = HiveConf.getLongVar(conf, - HiveConf.ConfVars.HIVELIMITMAXROWSIZE); + HiveConf.ConfVars.HIVE_LIMIT_MAX_ROW_SIZE); estimatedInput = (globalLimitCtx.getGlobalOffset() + globalLimitCtx.getGlobalLimit()) * sizePerRow; long minSplitSize = HiveConf.getLongVar(conf, - HiveConf.ConfVars.MAPREDMINSPLITSIZE); + HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE); long estimatedNumMap = inputSummary.getLength() / minSplitSize + 1; estimatedInput = estimatedInput * (estimatedNumMap + 1); } else { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index a3a60f3d5c41..3feb18b9202a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -27,6 +27,7 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Queue; @@ -60,13 +61,20 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.getTypeStringFromAST; +import static org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.unescapeIdentifier; + /** * Library of utility functions used in the parse code. @@ -204,7 +212,7 @@ static int checkJoinFilterRefersOneAlias(String[] tabAliases, ASTNode filterCond switch(filterCondn.getType()) { case HiveParser.TOK_TABLE_OR_COL: - String tableOrCol = SemanticAnalyzer.unescapeIdentifier(filterCondn.getChild(0).getText() + String tableOrCol = unescapeIdentifier(filterCondn.getChild(0).getText() .toLowerCase()); return getIndex(tabAliases, tableOrCol); case HiveParser.Identifier: @@ -533,7 +541,8 @@ public static CBOPlan parseQuery(HiveConf conf, String viewQuery) final ASTNode ast = parse(viewQuery, ctx); final CalcitePlanner analyzer = getAnalyzer(conf, ctx); RelNode logicalPlan = analyzer.genLogicalPlan(ast); - return new CBOPlan(ast, logicalPlan, analyzer.getInvalidAutomaticRewritingMaterializationReason()); + return new CBOPlan( + ast, logicalPlan, analyzer.getMaterializationValidationResult().getSupportedRewriteAlgorithms()); } public static List parseQueryAndGetSchema(HiveConf conf, String viewQuery) @@ -563,7 +572,7 @@ private static CalcitePlanner getAnalyzer(HiveConf conf, Context ctx) throws Sem */ public static Map> getFullPartitionSpecs( CommonTree ast, Table table, Configuration conf, boolean canGroupExprs) throws SemanticException { - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); Map colTypes = new HashMap<>(); List partitionKeys = table.getStorageHandler() != null && table.getStorageHandler().alwaysUnpartitioned() ? table.getStorageHandler().getPartitionKeys(table) : table.getPartitionKeys(); @@ -689,7 +698,7 @@ public static ReparseResult parseRewrittenQuery(Context ctx, // Set dynamic partitioning to nonstrict so that queries do not need any partition // references. // TODO: this may be a perf issue as it prevents the optimizer.. or not - HiveConf.setVar(ctx.getConf(), HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setVar(ctx.getConf(), HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); // Disable LLAP IO wrapper; doesn't propagate extra ACID columns correctly. HiveConf.setBoolVar(ctx.getConf(), HiveConf.ConfVars.LLAP_IO_ROW_WRAPPER_ENABLED, false); // Parse the rewritten query string @@ -725,4 +734,45 @@ public static final class ReparseResult { } } + public static TypeInfo getComplexTypeTypeInfo(ASTNode typeNode) throws SemanticException { + switch (typeNode.getType()) { + case HiveParser.TOK_LIST: + ListTypeInfo listTypeInfo = new ListTypeInfo(); + listTypeInfo.setListElementTypeInfo(getComplexTypeTypeInfo((ASTNode) typeNode.getChild(0))); + return listTypeInfo; + case HiveParser.TOK_MAP: + MapTypeInfo mapTypeInfo = new MapTypeInfo(); + String keyTypeString = getTypeStringFromAST((ASTNode) typeNode.getChild(0)); + mapTypeInfo.setMapKeyTypeInfo(TypeInfoFactory.getPrimitiveTypeInfo(keyTypeString)); + mapTypeInfo.setMapValueTypeInfo(getComplexTypeTypeInfo((ASTNode) typeNode.getChild(1))); + return mapTypeInfo; + case HiveParser.TOK_STRUCT: + StructTypeInfo structTypeInfo = new StructTypeInfo(); + Map fields = collectStructFieldNames(typeNode); + structTypeInfo.setAllStructFieldNames(new ArrayList<>(fields.keySet())); + structTypeInfo.setAllStructFieldTypeInfos(new ArrayList<>(fields.values())); + return structTypeInfo; + default: + String typeString = getTypeStringFromAST(typeNode); + return TypeInfoFactory.getPrimitiveTypeInfo(typeString); + } + } + + private static Map collectStructFieldNames(ASTNode structTypeNode) throws SemanticException { + ASTNode fieldListNode = (ASTNode) structTypeNode.getChild(0); + assert fieldListNode.getType() == HiveParser.TOK_TABCOLLIST; + + Map result = new LinkedHashMap<>(fieldListNode.getChildCount()); + for (int i = 0; i < fieldListNode.getChildCount(); i++) { + ASTNode child = (ASTNode) fieldListNode.getChild(i); + + String attributeIdentifier = unescapeIdentifier(child.getChild(0).getText()); + if (result.containsKey(attributeIdentifier)) { + throw new SemanticException(ErrorMsg.AMBIGUOUS_STRUCT_ATTRIBUTE, attributeIdentifier); + } else { + result.put(attributeIdentifier, getComplexTypeTypeInfo((ASTNode) child.getChild(1))); + } + } + return result; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java index f4514d700c0f..62bd84f9f912 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ReplicationSemanticAnalyzer.java @@ -56,7 +56,7 @@ import java.util.Collections; import java.util.Objects; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEQUERYID; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_QUERY_ID; import static org.apache.hadoop.hive.ql.exec.repl.ReplAck.LOAD_ACKNOWLEDGEMENT; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_DBNAME; import static org.apache.hadoop.hive.ql.parse.HiveParser.TOK_REPL_CONFIG; @@ -450,7 +450,7 @@ private void setConfigs(ASTNode node) throws SemanticException { for (Map.Entry config : replConfigs.entrySet()) { String key = config.getKey(); // don't set the query id in the config - if (key.equalsIgnoreCase(HIVEQUERYID.varname)) { + if (key.equalsIgnoreCase(HIVE_QUERY_ID.varname)) { String queryTag = config.getValue(); if (!StringUtils.isEmpty(queryTag)) { QueryState.setApplicationTag(conf, queryTag); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 8cdba7fd8dfe..227baef5ce48 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -21,11 +21,10 @@ import static java.util.Objects.nonNull; import static org.apache.commons.lang3.StringUtils.isNotBlank; import static org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMICPARTITIONCONVERT; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVEARCHIVEENABLED; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.DYNAMIC_PARTITION_CONVERT; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_ARCHIVE_ENABLED; import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_DEFAULT_STORAGE_HANDLER; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; -import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.DEFAULT_TABLE_LOCATION; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_STATS_DBCLASS; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_LOCATION; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_CTAS; @@ -182,6 +181,7 @@ import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.LockException; +import org.apache.hadoop.hive.ql.metadata.MaterializationValidationResult; import org.apache.hadoop.hive.ql.metadata.DefaultConstraint; import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Hive; @@ -226,6 +226,7 @@ import org.apache.hadoop.hive.ql.parse.type.TypeCheckCtx; import org.apache.hadoop.hive.ql.parse.type.TypeCheckProcFactory; import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.ColStatistics; import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeColumnListDesc; @@ -256,6 +257,7 @@ import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; import org.apache.hadoop.hive.ql.plan.ScriptDesc; import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.Statistics; import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.UDTFDesc; @@ -457,7 +459,7 @@ public class SemanticAnalyzer extends BaseSemanticAnalyzer { HiveParser.TOK_DISTRIBUTEBY, HiveParser.TOK_SORTBY); private String invalidResultCacheReason; - private String invalidAutomaticRewritingMaterializationReason; + private MaterializationValidationResult materializationValidationResult; private final NullOrdering defaultNullOrder; @@ -1137,7 +1139,7 @@ private String processTable(QB qb, ASTNode tabref) throws SemanticException { ASTNode tableTree = (ASTNode) (tabref.getChild(0)); - String tabIdName = getUnescapedName(tableTree).toLowerCase(); + String tabIdName = HiveUtils.getLowerCaseTableName(getUnescapedName(tableTree)); String alias = findSimpleTableName(tabref, aliasIndex); @@ -1207,7 +1209,7 @@ private String processTable(QB qb, ASTNode tabref) throws SemanticException { throw new SemanticException(generateErrorMessage((ASTNode) numerator, "Sampling percentage should be between 0 and 100")); } - int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM); + int seedNum = conf.getIntVar(ConfVars.HIVE_SAMPLE_RANDOM_NUM); sample = new SplitSample(percent, seedNum); } else if (type.getType() == HiveParser.TOK_ROWCOUNT) { sample = new SplitSample(Integer.parseInt(value)); @@ -1223,7 +1225,7 @@ private String processTable(QB qb, ASTNode tabref) throws SemanticException { } else if (last == 'g' || last == 'G') { length <<= 30; } - int seedNum = conf.getIntVar(ConfVars.HIVESAMPLERANDOMNUM); + int seedNum = conf.getIntVar(ConfVars.HIVE_SAMPLE_RANDOM_NUM); sample = new SplitSample(length, seedNum); } String alias_id = getAliasId(alias, qb); @@ -1271,8 +1273,8 @@ Map getNameToSplitSampleMap() { private void assertCombineInputFormat(Tree numerator, String message) throws SemanticException { String inputFormat = conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez") ? - HiveConf.getVar(conf, HiveConf.ConfVars.HIVETEZINPUTFORMAT): - HiveConf.getVar(conf, HiveConf.ConfVars.HIVEINPUTFORMAT); + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_TEZ_INPUT_FORMAT): + HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_INPUT_FORMAT); if (!inputFormat.equals(CombineHiveInputFormat.class.getName())) { throw new SemanticException(generateErrorMessage((ASTNode) numerator, message + " sampling is not supported in " + inputFormat)); @@ -1598,11 +1600,31 @@ Table materializeCTE(String cteName, CTEClause cte) throws HiveException { LOG.info("{} will be materialized into {}", cteName, location); cte.source = analyzer; - ctx.addMaterializedTable(cteName, table); + ctx.addMaterializedTable(cteName, table, getMaterializedTableStats(analyzer.getSinkOp(), table)); return table; } + static Statistics getMaterializedTableStats(Operator sinkOp, Table table) { + final Statistics tableStats = sinkOp.getStatistics().clone(); + final List sourceColStatsList = tableStats.getColumnStats(); + final List colNames = table.getCols().stream().map(FieldSchema::getName).collect(Collectors.toList()); + if (sourceColStatsList.size() != colNames.size()) { + throw new IllegalStateException(String.format( + "The size of col stats must be equal to that of schema. Stats = %s, Schema = %s", + sourceColStatsList, colNames)); + } + final List colStatsList = new ArrayList<>(sourceColStatsList.size()); + for (int i = 0; i < sourceColStatsList.size(); i++) { + final ColStatistics colStats = sourceColStatsList.get(i); + // FileSinkOperator stores column stats with internal names such as "_col1" + colStats.setColumnName(colNames.get(i)); + colStatsList.add(colStats); + } + tableStats.setColumnStats(colStatsList); + return tableStats; + } + static boolean isJoinToken(ASTNode node) { return (node.getToken().getType() == HiveParser.TOK_JOIN) @@ -1985,8 +2007,8 @@ boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PlannerContext plannerCtx, qb.getParseInfo().setIsAnalyzeCommand(true); qb.getParseInfo().setNoScanAnalyzeCommand(this.noscan); // Allow analyze the whole table and dynamic partitions - HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); break; @@ -2541,7 +2563,7 @@ private void getMetaData(QB qb, ReadEntity parentInput) } } } - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { // Add the table spec for the destination table. qb.getParseInfo().addTableSpec(ts.getTableName().getTable().toLowerCase(), ts); } @@ -2603,7 +2625,7 @@ private void getMetaData(QB qb, ReadEntity parentInput) throw new SemanticException( generateErrorMessage(ast, "Error creating temporary folder on: " + location.toString()), e); } - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESTATSAUTOGATHER)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_STATS_AUTOGATHER)) { TableSpec ts = new TableSpec(db, conf, this.ast); // Add the table spec for the destination table. qb.getParseInfo().addTableSpec(ts.getTableName().getTable().toLowerCase(), ts); @@ -4299,7 +4321,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) TableDesc outInfo; TableDesc errInfo; TableDesc inInfo; - String defaultSerdeName = conf.getVar(HiveConf.ConfVars.HIVESCRIPTSERDE); + String defaultSerdeName = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_SERDE); Class serde; try { @@ -4310,7 +4332,7 @@ private Operator genScriptPlan(ASTNode trfm, QB qb, Operator input) } int fieldSeparator = Utilities.tabCode; - if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVESCRIPTESCAPE)) { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_SCRIPT_ESCAPE)) { fieldSeparator = Utilities.ctrlaCode; } @@ -4374,7 +4396,7 @@ private Class getRecordReader(ASTNode node) String name; if (node.getChildCount() == 0) { - name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDREADER); + name = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_RECORD_READER); } else { name = unescapeSQLString(node.getChild(0).getText()); } @@ -4391,7 +4413,7 @@ private Class getDefaultRecordReader() throws SemanticException { String name; - name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDREADER); + name = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_RECORD_READER); try { return (Class) Class.forName(name, true, @@ -4406,7 +4428,7 @@ private Class getRecordWriter(ASTNode node) String name; if (node.getChildCount() == 0) { - name = conf.getVar(HiveConf.ConfVars.HIVESCRIPTRECORDWRITER); + name = conf.getVar(HiveConf.ConfVars.HIVE_SCRIPT_RECORD_WRITER); } else { name = unescapeSQLString(node.getChild(0).getText()); } @@ -5378,13 +5400,13 @@ private Operator genGroupByPlanGroupByOperator(QBParseInfo parseInfo, genericUDAFEvaluators.put(entry.getKey(), genericUDAFEvaluator); } } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, @@ -5647,13 +5669,13 @@ private Operator genGroupByPlanGroupByOperator1(QBParseInfo parseInfo, groupByOutputRowResolver.putExpression(value, new ColumnInfo( field, udaf.returnType, "", false)); } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); // Nothing special needs to be done for grouping sets if // this is the final group by operator, and multiple rows corresponding to the @@ -5828,13 +5850,13 @@ private Operator genGroupByPlanMapGroupByOperator(QB qb, genericUDAFEvaluators.put(entry.getKey(), genericUDAFEvaluator); } } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(GroupByDesc.Mode.HASH, outputColumnNames, groupByKeys, aggregations, false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr, minReductionHashAggrLowerBound, @@ -6365,13 +6387,13 @@ private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, groupByOutputRowResolver2.putExpression(value, new ColumnInfo( field, udaf.returnType, "", false)); } - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(GroupByDesc.Mode.FINAL, outputColumnNames, groupByKeys, aggregations, @@ -7109,9 +7131,9 @@ private Operator genBucketingSortingDest(String dest, Operator input, QB qb, if (enforceBucketing) { Operation acidOp = AcidUtils.isFullAcidTable(dest_tab) ? getAcidType(table_desc.getOutputFileFormatClass(), dest, AcidUtils.isInsertOnlyTable(dest_tab)) : Operation.NOT_ACID; - int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); - if (conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS) > 0) { - maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS); + int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAX_REDUCERS); + if (conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS) > 0) { + maxReducers = conf.getIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS); } int numBuckets = dest_tab.getNumBuckets(); if (numBuckets > maxReducers) { @@ -7857,8 +7879,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) cols = ct.cols; colTypes = ct.colTypes; dpCtx = new DynamicPartitionCtx(partitionColumnNames, - conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS_PER_NODE)); qbm.setDPCtx(dest, dpCtx); isPartitioned = true; } else { @@ -7950,7 +7972,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) } } else { if (tblDesc.isCTAS() && tblDesc.getStorageHandler() != null) { - tblDesc.setLocation(getCtasOrCMVLocation(tblDesc, viewDesc, createTableUseSuffix).toString()); + tblDesc.toTable(conf).getStorageHandler().setTableLocationForCTAS( + tblDesc, getCtasOrCMVLocation(tblDesc, viewDesc, false).toString()); } tableDescriptor = PlanUtils.getTableDesc(tblDesc, cols, colTypes); } @@ -8052,8 +8075,9 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) loadFileDesc.setMoveTaskId(moveTaskId); loadFileWork.add(loadFileDesc); try { + FileSystem fs = isDfsDir ? destinationPath.getFileSystem(conf) : FileSystem.getLocal(conf); Path qualifiedPath = conf.getBoolVar(ConfVars.HIVE_RANGER_USE_FULLY_QUALIFIED_URL) ? - destinationPath.getFileSystem(conf).makeQualified(destinationPath) : destinationPath; + fs.makeQualified(destinationPath) : destinationPath; if (!outputs.add(new WriteEntity(qualifiedPath, !isDfsDir, isDestTempFile))) { throw new SemanticException(ErrorMsg.OUTPUT_SPECIFIED_MULTIPLE_TIMES .getMsg(destinationPath.toUri().toString())); @@ -8175,8 +8199,8 @@ protected Operator genFileSinkPlan(String dest, QB qb, Operator input) // the following code is used to collect column stats when // hive.stats.autogather=true // and it is an insert overwrite or insert into table - if (conf.getBoolVar(ConfVars.HIVESTATSAUTOGATHER) - && conf.getBoolVar(ConfVars.HIVESTATSCOLAUTOGATHER) + if (conf.getBoolVar(ConfVars.HIVE_STATS_AUTOGATHER) + && conf.getBoolVar(ConfVars.HIVE_STATS_COL_AUTOGATHER) && enableColumnStatsCollecting() && destinationTable != null && (!destinationTable.isNonNative() || destinationTable.getStorageHandler().commitInMoveTask()) @@ -8471,7 +8495,7 @@ private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc, throw new IllegalStateException("Unexpected dest_type=" + dest_tab); } FileSinkDesc fileSinkDesc = new FileSinkDesc(queryTmpdir, table_desc, - conf.getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId, rsCtx.isMultiFileSpray(), + conf.getBoolVar(HiveConf.ConfVars.COMPRESS_RESULT), currentTableId, rsCtx.isMultiFileSpray(), canBeMerged, rsCtx.getNumFiles(), rsCtx.getTotalFiles(), rsCtx.getPartnCols(), dpCtx, dest_path, mmWriteId, isMmCtas, isInsertOverwrite, qb.getIsQuery(), qb.isCTAS() || qb.isMaterializedView(), isDirectInsert, acidOperation, @@ -8516,7 +8540,7 @@ private FileSinkDesc createFileSinkDesc(String dest, TableDesc table_desc, // it should be the same as the MoveWork's sourceDir. fileSinkDesc.setStatsAggPrefix(fileSinkDesc.getDirName().toString()); if (!destTableIsMaterialization && - HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) { + HiveConf.getVar(conf, HIVE_STATS_DBCLASS).equalsIgnoreCase(StatDB.fs.name())) { String statsTmpLoc = ctx.getTempDirForInterimJobPath(dest_path).toString(); fileSinkDesc.setStatsTmpDir(statsTmpLoc); LOG.debug("Set stats collection dir : " + statsTmpLoc); @@ -8641,7 +8665,13 @@ private WriteEntity generateTableWriteEntity(String dest, Table dest_tab, new DummyPartition(dest_tab, dest_tab.getDbName() + "@" + dest_tab.getTableName() + "@" + ppath, partSpec); - output = new WriteEntity(p, getWriteType(dest), false); + WriteEntity.WriteType writeType; + if (ltd.isInsertOverwrite()) { + writeType = WriteEntity.WriteType.INSERT_OVERWRITE; + } else { + writeType = getWriteType(dest); + } + output = new WriteEntity(p, writeType, false); output.setDynamicPartitionWrite(true); outputs.add(output); } @@ -8699,8 +8729,8 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, if (dpCtx == null) { dest_tab.validatePartColumnNames(partSpec, false); dpCtx = new DynamicPartitionCtx(partSpec, - conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME), - conf.getIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTSPERNODE)); + conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME), + conf.getIntVar(HiveConf.ConfVars.DYNAMIC_PARTITION_MAX_PARTS_PER_NODE)); qbm.setDPCtx(dest, dpCtx); } @@ -8713,7 +8743,7 @@ private DynamicPartitionCtx checkDynPart(QB qb, QBMetaData qbm, Table dest_tab, } private static void verifyDynamicPartitionEnabled(HiveConf conf, QB qb, String dest) throws SemanticException { - if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING)) { // allow DP + if (!HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING)) { // allow DP throw new SemanticException(generateErrorMessage(qb.getParseInfo().getDestForClause(dest), ErrorMsg.DYNAMIC_PARTITION_DISABLED.getMsg())); } @@ -8750,8 +8780,8 @@ String fixCtasColumnName(String colName) { private void checkAcidConstraints() { /* LOG.info("Modifying config values for ACID write"); - conf.setBoolVar(ConfVars.HIVEOPTREDUCEDEDUPLICATION, true); - conf.setIntVar(ConfVars.HIVEOPTREDUCEDEDUPLICATIONMINREDUCER, 1); + conf.setBoolVar(ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION, true); + conf.setIntVar(ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION_MIN_REDUCER, 1); These props are now enabled elsewhere (see commit diffs). It would be better instead to throw if they are not set. For exmaple, if user has set hive.optimize.reducededuplication=false for some reason, we'll run a query contrary to what they wanted... But throwing now would be @@ -8865,7 +8895,7 @@ private Operator genConversionSelectOperator(String dest, QB qb, Operator input, // Check column number List tableFields = oi.getAllStructFieldRefs(); - boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING); + boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING); List rowFields = opParseCtx.get(input).getRowResolver().getColumnInfos(); int inColumnCnt = rowFields.size(); int outColumnCnt = tableFields.size(); @@ -8936,7 +8966,7 @@ private Operator genConversionSelectOperator(String dest, QB qb, Operator input, new ExprNodeColumnDesc(inputTypeInfo, inputColumn.getInternalName(), "", true); // Cast input column to destination column type if necessary. - if (conf.getBoolVar(DYNAMICPARTITIONCONVERT)) { + if (conf.getBoolVar(DYNAMIC_PARTITION_CONVERT)) { if (parts != null && !parts.isEmpty()) { String destPartitionName = dpCtx.getDPColNames().get(dpColIdx); FieldSchema destPartitionFieldSchema = parts.stream() @@ -10095,13 +10125,13 @@ private Operator genMapGroupByForSemijoin(List fields, Operator inpu } // Generate group-by operator - float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY); + float groupByMemoryUsage = HiveConf.getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY); float memoryThreshold = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRMEMORYTHRESHOLD); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_MEMORY_THRESHOLD); float minReductionHashAggr = HiveConf - .getFloatVar(conf, HiveConf.ConfVars.HIVEMAPAGGRHASHMINREDUCTION); + .getFloatVar(conf, HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION); float minReductionHashAggrLowerBound = HiveConf - .getFloatVar(conf, ConfVars.HIVEMAPAGGRHASHMINREDUCTIONLOWERBOUND); + .getFloatVar(conf, ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_LOWER_BOUND); Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( new GroupByDesc(GroupByDesc.Mode.HASH, outputColumnNames, groupByKeys, aggregations, false, groupByMemoryUsage, memoryThreshold, minReductionHashAggr, minReductionHashAggrLowerBound, @@ -10205,7 +10235,7 @@ private List getMapSideJoinTables(QB qb) { ASTNode hint = (ASTNode) hints.getChild(pos); if (((ASTNode) hint.getChild(0)).getToken().getType() == HintParser.TOK_MAPJOIN) { // the user has specified to ignore mapjoin hint - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEIGNOREMAPJOINHINT) + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_IGNORE_MAPJOIN_HINT) && !conf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_ENGINE).equals("tez")) { ASTNode hintTblNames = (ASTNode) hint.getChild(1); int numCh = hintTblNames.getChildCount(); @@ -11357,7 +11387,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT // If we can put multiple group bys in a single reducer, determine suitable groups of // expressions, otherwise treat all the expressions as a single group - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMULTIGROUPBYSINGLEREDUCER)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_MULTI_GROUPBY_SINGLE_REDUCER)) { try { commonGroupByDestGroups = getCommonGroupByDestGroups(qb, inputs); } catch (SemanticException e) { @@ -11389,8 +11419,8 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT if (commonGroupByDestGroup.size() == 1 || (qbp.getAggregationExprsForClause(firstDest).size() == 0 && getGroupByForClause(qbp, firstDest).size() == 0) || - conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW) || - !conf.getBoolVar(HiveConf.ConfVars.HIVEMULTIGROUPBYSINGLEREDUCER)) { + conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW) || + !conf.getBoolVar(HiveConf.ConfVars.HIVE_MULTI_GROUPBY_SINGLE_REDUCER)) { // Go over all the destination tables for (String dest : commonGroupByDestGroup) { @@ -11408,7 +11438,7 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT && (qbp.getSelForClause(dest).getToken().getType() != HiveParser.TOK_SELECTDI || qbp.getWindowingExprsForClause(dest) == null)) { // multiple distincts is not supported with skew in data - if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW) && + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW) && qbp.getDistinctFuncExprsForClause(dest).size() > 1) { throw new SemanticException(ErrorMsg.UNSUPPORTED_MULTIPLE_DISTINCTS. getMsg()); @@ -11433,13 +11463,13 @@ private Operator genBodyPlan(QB qb, Operator input, Map aliasT qbp.setSelExprForClause(dest, genSelectDIAST(rr)); } } - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlanMapAggrNoSkew(dest, qb, curr); } else { curr = genGroupByPlanMapAggr2MR(dest, qb, curr); } - } else if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + } else if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlan2MR(dest, qb, curr); } else { curr = genGroupByPlan1MR(dest, qb, curr); @@ -11494,13 +11524,13 @@ private Operator genPostGroupByBodyPlan(Operator curr, String dest, QB qb, || getGroupByForClause(qbp, dest).size() > 0) && qbp.getSelForClause(dest).getToken().getType() == HiveParser.TOK_SELECTDI && qbp.getWindowingExprsForClause(dest) != null) { - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMAPSIDEAGGREGATE)) { - if (!conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_MAPSIDE_AGGREGATE)) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlanMapAggrNoSkew(dest, qb, curr); } else { curr = genGroupByPlanMapAggr2MR(dest, qb, curr); } - } else if (conf.getBoolVar(HiveConf.ConfVars.HIVEGROUPBYSKEW)) { + } else if (conf.getBoolVar(HiveConf.ConfVars.HIVE_GROUPBY_SKEW)) { curr = genGroupByPlan2MR(dest, qb, curr); } else { curr = genGroupByPlan1MR(dest, qb, curr); @@ -12079,13 +12109,13 @@ samplePredicate, true, new SampleDesc(ts.getNumerator(), new RowSchema(rwsch.getColumnInfos()), top); } } else { - boolean testMode = conf.getBoolVar(ConfVars.HIVETESTMODE); + boolean testMode = conf.getBoolVar(ConfVars.HIVE_TEST_MODE); if (testMode) { String tabName = tab.getTableName(); // has the user explicitly asked not to sample this table String unSampleTblList = conf - .getVar(ConfVars.HIVETESTMODENOSAMPLE); + .getVar(ConfVars.HIVE_TEST_MODE_NOSAMPLE); String[] unSampleTbls = unSampleTblList.split(","); boolean unsample = false; for (String unSampleTbl : unSampleTbls) { @@ -12114,7 +12144,7 @@ samplePredicate, true, new SampleDesc(ts.getNumerator(), LOG.info("No need for sample filter"); } else { // The table is not bucketed, add a dummy filter :: rand() - int freq = conf.getIntVar(ConfVars.HIVETESTMODESAMPLEFREQ); + int freq = conf.getIntVar(ConfVars.HIVE_TEST_MODE_SAMPLE_FREQ); TableSample tsSample = new TableSample(1, freq); tsSample.setInputPruning(false); qb.getParseInfo().setTabSample(alias, tsSample); @@ -12135,6 +12165,11 @@ samplePredicate, true, new SampleDesc(ts.getNumerator(), Operator output = putOpInsertMap(op, rwsch); + if (tab.isMaterializedTable()) { + // Clone Statistics just in case because multiple TableScanOperator can access the same CTE + top.setStatistics(ctx.getMaterializedTableStats(tab.getFullTableName()).clone()); + } + LOG.debug("Created Table Plan for {} {}", alias, op); return output; @@ -12155,7 +12190,7 @@ private void setupStats(TableScanDesc tsDesc, QBParseInfo qbp, Table tab, String return; } - if (HiveConf.getVar(conf, HIVESTATSDBCLASS).equalsIgnoreCase(StatDB.fs.name())) { + if (HiveConf.getVar(conf, HIVE_STATS_DBCLASS).equalsIgnoreCase(StatDB.fs.name())) { String statsTmpLoc = ctx.getTempDirForInterimJobPath(tab.getPath()).toString(); LOG.debug("Set stats collection dir : " + statsTmpLoc); tsDesc.setTmpStatsDir(statsTmpLoc); @@ -13196,7 +13231,7 @@ void analyzeInternal(ASTNode ast, Supplier pcf) throws SemanticE // Add the transformation that computes the lineage information. Set postExecHooks = Sets.newHashSet(Splitter.on(",").trimResults() .omitEmptyStrings() - .split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POSTEXECHOOKS)))); + .split(Strings.nullToEmpty(HiveConf.getVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS)))); if (postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.PostExecutePrinter") || postExecHooks.contains("org.apache.hadoop.hive.ql.hooks.LineageLogger") || postExecHooks.contains("org.apache.atlas.hive.hook.HiveHook")) { @@ -13660,7 +13695,7 @@ public void validate() throws SemanticException { LOG.debug("validated " + usedp.getName()); LOG.debug(usedp.getTable().getTableName()); - if (!AcidUtils.isTransactionalTable(tbl) && conf.getBoolVar(HIVEARCHIVEENABLED)) { + if (!AcidUtils.isTransactionalTable(tbl) && conf.getBoolVar(HIVE_ARCHIVE_ENABLED)) { // Do not check for ACID; it does not create new parts and this is expensive as hell. // TODO: add an API to get table name list for archived parts with a single call; // nobody uses this so we could skip the whole thing. @@ -13746,7 +13781,7 @@ private Map validateAndAddDefaultProperties( boolean isTemporaryTable, boolean isTransactional, boolean isManaged, String[] qualifiedTabName, boolean isTableTypeChanged) throws SemanticException { Map retValue = Optional.ofNullable(tblProp).orElseGet(HashMap::new); - String paraString = HiveConf.getVar(conf, ConfVars.NEWTABLEDEFAULTPARA); + String paraString = HiveConf.getVar(conf, ConfVars.NEW_TABLE_DEFAULT_PARA); if (paraString != null && !paraString.isEmpty()) { for (String keyValuePair : paraString.split(",")) { String[] keyValue = keyValuePair.split("=", 2); @@ -13803,7 +13838,7 @@ private Map validateAndAddDefaultProperties( } if (isIcebergTable(retValue)) { - SessionStateUtil.addResourceOrThrow(conf, hive_metastoreConstants.DEFAULT_TABLE_LOCATION, + SessionStateUtil.addResourceOrThrow(conf, SessionStateUtil.DEFAULT_TABLE_LOCATION, getDefaultLocation(qualifiedTabName[0], qualifiedTabName[1], true)); } return retValue; @@ -14014,7 +14049,7 @@ ASTNode analyzeCreateTable( throw new SemanticException(ErrorMsg.CTAS_COLLST_COEXISTENCE.getMsg()); } if (partCols.size() != 0 || bucketCols.size() != 0) { - boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONING); + boolean dynPart = HiveConf.getBoolVar(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING); if (dynPart == false) { throw new SemanticException(ErrorMsg.CTAS_PARCOL_COEXISTENCE.getMsg()); } else { @@ -14331,7 +14366,7 @@ ASTNode analyzeCreateTable( if(lStats != null && lStats.length != 0) { // Don't throw an exception if the target location only contains the staging-dirs for (FileStatus lStat : lStats) { - if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGINGDIR))) { + if (!lStat.getPath().getName().startsWith(HiveConf.getVar(conf, HiveConf.ConfVars.STAGING_DIR))) { throw new SemanticException(ErrorMsg.CTAS_LOCATION_NONEMPTY.getMsg(location)); } } @@ -14663,11 +14698,12 @@ private void validateCreateView() } throw new SemanticException(msg); } - if (!isValidAutomaticRewritingMaterialization()) { - String errorMessage = "Only query text based automatic rewriting is available for materialized view. " + - getInvalidAutomaticRewritingMaterializationReason(); + if (materializationValidationResult.getSupportedRewriteAlgorithms().isEmpty()) { + createVwDesc.setRewriteEnabled(false); + } + String errorMessage = materializationValidationResult.getErrorMessage(); + if (isNotBlank(errorMessage)) { console.printError(errorMessage); - LOG.warn(errorMessage); } } } catch (HiveException e) { @@ -15924,18 +15960,14 @@ public ColsAndTypes(String cols, String colTypes) { public String colTypes; } - public String getInvalidAutomaticRewritingMaterializationReason() { - return invalidAutomaticRewritingMaterializationReason; - } - - public void setInvalidAutomaticRewritingMaterializationReason( - String invalidAutomaticRewritingMaterializationReason) { - this.invalidAutomaticRewritingMaterializationReason = - invalidAutomaticRewritingMaterializationReason; + public MaterializationValidationResult getMaterializationValidationResult() { + return materializationValidationResult; } - public boolean isValidAutomaticRewritingMaterialization() { - return (invalidAutomaticRewritingMaterializationReason == null); + public void setMaterializationValidationResult( + MaterializationValidationResult materializationValidationResult) { + this.materializationValidationResult = + materializationValidationResult; } public String getInvalidResultCacheReason() { @@ -15973,8 +16005,7 @@ protected enum MaterializationRebuildMode { INSERT_OVERWRITE_REBUILD, AGGREGATE_INSERT_REBUILD, AGGREGATE_INSERT_DELETE_REBUILD, - JOIN_INSERT_REBUILD, - JOIN_INSERT_DELETE_REBUILD + JOIN_INSERT_REBUILD } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java index b01be0740631..50f08ff1f4ed 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/StorageFormat.java @@ -180,9 +180,9 @@ public void processStorageFormat(String name) throws SemanticException { if (serde == null) { // RCFile supports a configurable SerDe if (name.equalsIgnoreCase(IOConstants.RCFILE)) { - serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE)); + serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE)); } else { - serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTSERDE)); + serde = ensureClassExists(HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_SERDE)); } } } @@ -197,8 +197,8 @@ public void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializedV HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_FILE_FORMAT); serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_MATERIALIZED_VIEW_SERDE); } else { - defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTFILEFORMAT); - defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTMANAGEDFILEFORMAT); + defaultFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_FILEFORMAT); + defaultManagedFormat = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_MANAGED_FILEFORMAT); } if (!isExternal && !"none".equals(defaultManagedFormat)) { @@ -211,7 +211,7 @@ public void fillDefaultStorageFormat(boolean isExternal, boolean isMaterializedV } else { processStorageFormat(defaultFormat); if (defaultFormat.equalsIgnoreCase(IOConstants.RCFILE)) { - serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE); + serde = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 1095b13e1a3e..062a3fb4d344 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -261,7 +261,7 @@ public void compile(final ParseContext pCtx, // For the FetchTask, the limit optimization requires we fetch all the rows // in memory and count how many rows we get. It's not practical if the // limit factor is too big - int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVELIMITOPTMAXFETCH); + int fetchLimit = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_LIMIT_OPT_MAX_FETCH); if (globalLimitCtx.isEnable() && globalLimitCtx.getGlobalLimit() > fetchLimit) { LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit + ". Doesn't qualify limit optimization."); @@ -764,12 +764,12 @@ protected abstract void generateTaskTree(List> rootTasks, ParseContext p protected void runDynPartitionSortOptimizations(ParseContext parseContext, HiveConf hConf) throws SemanticException { // run Sorted dynamic partition optimization - if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.DYNAMICPARTITIONING) && - HiveConf.getVar(hConf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equals("nonstrict") && - !HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) { + if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.DYNAMIC_PARTITIONING) && + HiveConf.getVar(hConf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equals("nonstrict") && + !HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) { new SortedDynPartitionOptimizer().transform(parseContext); - if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) { + if(HiveConf.getBoolVar(hConf, HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION)) { // Dynamic sort partition adds an extra RS therefore need to de-dup new ReduceSinkDeDuplication().transform(parseContext); // there is an issue with dedup logic wherein SELECT is created with wrong columns diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index 57f1f491b315..6c17e9878893 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -195,7 +195,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException } // need to run this; to get consistent filterop conditions(for operator tree matching) - if (procCtx.conf.getBoolVar(ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + if (procCtx.conf.getBoolVar(ConfVars.HIVE_OPT_CONSTANT_PROPAGATION)) { new ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext); } @@ -205,15 +205,15 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Setup stats in the operator plan"); // run Sorted dynamic partition optimization - if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.DYNAMICPARTITIONING) && - HiveConf.getVar(procCtx.conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE).equals("nonstrict") && - !HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVEOPTLISTBUCKETING)) { + if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING) && + HiveConf.getVar(procCtx.conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE).equals("nonstrict") && + !HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVE_OPT_LIST_BUCKETING)) { perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); new SortedDynPartitionOptimizer().transform(procCtx.parseContext); perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Sorted dynamic partition optimization"); } - if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVEOPTREDUCEDEDUPLICATION)) { + if(HiveConf.getBoolVar(procCtx.conf, HiveConf.ConfVars.HIVE_OPT_REDUCE_DEDUPLICATION)) { perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); // Dynamic sort partition adds an extra RS therefore need to de-dup new ReduceSinkDeDuplication().transform(procCtx.parseContext); @@ -233,7 +233,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException new BucketVersionPopulator().transform(pCtx); perfLogger.perfLogBegin(this.getClass().getName(), PerfLogger.TEZ_COMPILER); - if(procCtx.conf.getBoolVar(ConfVars.HIVEOPTJOINREDUCEDEDUPLICATION)) { + if(procCtx.conf.getBoolVar(ConfVars.HIVE_OPT_JOIN_REDUCE_DEDUPLICATION)) { new ReduceSinkJoinDeDuplication().transform(procCtx.parseContext); } perfLogger.perfLogEnd(this.getClass().getName(), PerfLogger.TEZ_COMPILER, "Run reduce sink after join algorithm selection"); @@ -253,7 +253,7 @@ protected void optimizeOperatorPlan(ParseContext pCtx) throws SemanticException // of "and true and true" conditions. // Rather than run the full constant folding just need to shortcut AND/OR expressions // involving constant true/false values. - if(procCtx.conf.getBoolVar(ConfVars.HIVEOPTCONSTANTPROPAGATION)) { + if(procCtx.conf.getBoolVar(ConfVars.HIVE_OPT_CONSTANT_PROPAGATION)) { new ConstantPropagate(ConstantPropagateOption.SHORTCUT).transform(procCtx.parseContext); } @@ -481,7 +481,7 @@ private void runStatsDependentOptimizations(OptimizeTezProcContext procCtx) thro new SetReducerParallelism()); opRules.put(new RuleRegExp("Convert Join to Map-join", JoinOperator.getOperatorName() + "%"), new ConvertJoinMapJoin()); - if (procCtx.conf.getBoolVar(ConfVars.HIVEMAPAGGRHASHMINREDUCTIONSTATSADJUST)) { + if (procCtx.conf.getBoolVar(ConfVars.HIVE_MAP_AGGR_HASH_MIN_REDUCTION_STATS_ADJUST)) { opRules.put(new RuleRegExp("Set min reduction - GBy (Hash)", GroupByOperator.getOperatorName() + "%"), new SetHashGroupByMinReduction()); @@ -681,7 +681,7 @@ protected void generateTaskTree(List> rootTasks, ParseContext pCtx, for (BaseWork w : baseWorkList) { // work should be the smallest unit for memory allocation w.setReservedMemoryMB( - (int)(conf.getLongVar(ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD) / (1024 * 1024))); + (int)(conf.getLongVar(ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD) / (1024 * 1024))); } } @@ -788,13 +788,13 @@ protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, PhysicalContext physicalCtx = new PhysicalContext(conf, pCtx, pCtx.getContext(), rootTasks, pCtx.getFetchTask()); - if (conf.getBoolVar(HiveConf.ConfVars.HIVENULLSCANOPTIMIZE)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_NULL_SCAN_OPTIMIZE)) { physicalCtx = new NullScanOptimizer().resolve(physicalCtx); } else { LOG.debug("Skipping null scan query optimization"); } - if (conf.getBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES)) { physicalCtx = new MetadataOnlyOptimizer().resolve(physicalCtx); } else { LOG.debug("Skipping metadata only query optimization"); @@ -818,14 +818,14 @@ protected void optimizeTaskPlan(List> rootTasks, ParseContext pCtx, LOG.debug("Skipping vectorization"); } - if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE))) { + if (!"none".equalsIgnoreCase(conf.getVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE))) { physicalCtx = new StageIDsRearranger().resolve(physicalCtx); } else { LOG.debug("Skipping stage id rearranger"); } if ((conf.getBoolVar(HiveConf.ConfVars.HIVE_TEZ_ENABLE_MEMORY_MANAGER)) - && (conf.getBoolVar(HiveConf.ConfVars.HIVEUSEHYBRIDGRACEHASHJOIN))) { + && (conf.getBoolVar(HiveConf.ConfVars.HIVE_USE_HYBRIDGRACE_HASHJOIN))) { physicalCtx = new MemoryDecider().resolve(physicalCtx); } @@ -1488,7 +1488,7 @@ private void removeSemiJoinEdgesForUnion(OptimizeTezProcContext procCtx) throws */ private void removeSemijoinsParallelToMapJoin(OptimizeTezProcContext procCtx) throws SemanticException { - if (!procCtx.conf.getBoolVar(ConfVars.HIVECONVERTJOIN)) { + if (!procCtx.conf.getBoolVar(ConfVars.HIVE_CONVERT_JOIN)) { // Not needed without mapjoin conversion return; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteDeleteRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteDeleteRewriter.java index 5c4366b5ba40..c1396199c8f2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteDeleteRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteDeleteRewriter.java @@ -65,7 +65,8 @@ public ParseUtils.ReparseResult rewrite(Context context, DeleteStatement deleteB sqlGenerator.append(sqlGenerator.getTargetTableFullName()); // Add the inverted where clause, since we want to hold the records which doesn't satisfy the condition. - sqlGenerator.append("\nwhere NOT (").append(whereClause).append(")"); + sqlGenerator.append("\nwhere "); + sqlGenerator.append("( NOT(%s) OR (%s) IS NULL )".replace("%s", whereClause)); sqlGenerator.append("\n"); // Add the file path filter that matches the delete condition. sqlGenerator.append("AND ").append(filePathCol); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java index c99324a8be72..968f34078af9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteMergeRewriter.java @@ -127,6 +127,7 @@ private static String replaceColumnRefsWithTargetPrefix(String columnRef, String static class CopyOnWriteMergeWhenClauseSqlGenerator extends MergeRewriter.MergeWhenClauseSqlGenerator { private final COWWithClauseBuilder cowWithClauseBuilder; + private int subQueryCount = 0; CopyOnWriteMergeWhenClauseSqlGenerator( HiveConf conf, MultiInsertSqlGenerator sqlGenerator, MergeStatement mergeStatement) { @@ -138,7 +139,7 @@ static class CopyOnWriteMergeWhenClauseSqlGenerator extends MergeRewriter.MergeW public void appendWhenNotMatchedInsertClause(MergeStatement.InsertClause insertClause) { String targetAlias = mergeStatement.getTargetAlias(); - if (mergeStatement.getWhenClauses().size() > 1) { + if (++subQueryCount > 1) { sqlGenerator.append("union all\n"); } sqlGenerator.append(" -- insert clause\n").append("SELECT "); @@ -173,7 +174,8 @@ public void appendWhenMatchedUpdateClause(MergeStatement.UpdateClause updateClau UnaryOperator columnRefsFunc = value -> replaceColumnRefsWithTargetPrefix(targetAlias, value); sqlGenerator.append(" -- update clause (insert part)\n").append("SELECT "); - + ++subQueryCount; + if (isNotBlank(hintStr)) { sqlGenerator.append(hintStr); hintStr = null; @@ -206,8 +208,7 @@ protected void handleWhenMatchedDelete(String onClauseAsString, String extraPred UnaryOperator columnRefsFunc = value -> replaceColumnRefsWithTargetPrefix(targetAlias, value); List deleteValues = sqlGenerator.getDeleteValues(Context.Operation.DELETE); - List whenClauses = mergeStatement.getWhenClauses(); - if (whenClauses.size() > 1 || whenClauses.get(0) instanceof MergeStatement.UpdateClause) { + if (++subQueryCount > 1) { sqlGenerator.append("union all\n"); } sqlGenerator.append(" -- delete clause\n").append("SELECT "); @@ -227,14 +228,14 @@ protected void handleWhenMatchedDelete(String onClauseAsString, String extraPred String whereClauseStr = columnRefsFunc.apply(whereClause.toString()); String filePathCol = HiveUtils.unparseIdentifier(TARGET_PREFIX + VirtualColumn.FILE_PATH.getName(), conf); - sqlGenerator.append("\n").indent(); - sqlGenerator.append("NOT(").append(whereClauseStr.replace("=","<=>")); - if (isNotBlank(onClausePredicate)) { - sqlGenerator.append(" OR "); - sqlGenerator.append(columnRefsFunc.apply(mergeStatement.getOnClausePredicate())); + whereClause.append(" OR ").append(onClausePredicate); } - sqlGenerator.append(")\n").indent(); + sqlGenerator.append("\n").indent(); + sqlGenerator.append("( NOT(%s) OR (%s) IS NULL )".replace("%s", columnRefsFunc.apply( + whereClause.toString()))); + + sqlGenerator.append("\n").indent(); // Add the file path filter that matches the delete condition. sqlGenerator.append("AND ").append(filePathCol); sqlGenerator.append(" IN ( select ").append(filePathCol).append(" from t )"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteUpdateRewriter.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteUpdateRewriter.java index bf88c1da3d47..5279f6361786 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteUpdateRewriter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/rewrite/CopyOnWriteUpdateRewriter.java @@ -116,7 +116,8 @@ public ParseUtils.ReparseResult rewrite(Context context, UpdateStatement updateB sqlGenerator.append(" from "); sqlGenerator.appendTargetTableName(); // Add the inverted where clause, since we want to hold the records which doesn't satisfy the condition. - sqlGenerator.append("\nwhere NOT (").append(whereClause).append(")"); + sqlGenerator.append("\nwhere "); + sqlGenerator.append("( NOT(%s) OR (%s) IS NULL )".replace("%s", whereClause)); sqlGenerator.append("\n").indent(); // Add the file path filter that matches the delete condition. sqlGenerator.append("AND ").append(filePathCol); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/ExprNodeDescExprFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/ExprNodeDescExprFactory.java index 40c5f6da1922..b27b98f5f7e5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/ExprNodeDescExprFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/ExprNodeDescExprFactory.java @@ -113,30 +113,40 @@ protected boolean isExprInstance(Object o) { protected ExprNodeDesc toExpr(ColumnInfo colInfo, RowResolver rowResolver, int offset) throws SemanticException { ObjectInspector inspector = colInfo.getObjectInspector(); - if (inspector instanceof ConstantObjectInspector && inspector instanceof PrimitiveObjectInspector) { - return toPrimitiveConstDesc(colInfo, inspector); - } - if (inspector instanceof ConstantObjectInspector && inspector instanceof ListObjectInspector) { - ObjectInspector listElementOI = ((ListObjectInspector)inspector).getListElementObjectInspector(); - if (listElementOI instanceof PrimitiveObjectInspector) { - return toListConstDesc(colInfo, inspector, listElementOI); + if (inspector instanceof ConstantObjectInspector) { + if (inspector instanceof PrimitiveObjectInspector) { + return toPrimitiveConstDesc(colInfo, inspector); } - } - if (inspector instanceof ConstantObjectInspector && inspector instanceof MapObjectInspector) { - ObjectInspector keyOI = ((MapObjectInspector)inspector).getMapKeyObjectInspector(); - ObjectInspector valueOI = ((MapObjectInspector)inspector).getMapValueObjectInspector(); - if (keyOI instanceof PrimitiveObjectInspector && valueOI instanceof PrimitiveObjectInspector) { - return toMapConstDesc(colInfo, inspector, keyOI, valueOI); + + Object inputConstantValue = ((ConstantObjectInspector) inspector).getWritableConstantValue(); + if (inputConstantValue == null) { + return createExprNodeConstantDesc(colInfo, null); } - } - if (inspector instanceof ConstantObjectInspector && inspector instanceof StructObjectInspector) { - boolean allPrimitive = true; - List fields = ((StructObjectInspector)inspector).getAllStructFieldRefs(); - for (StructField field : fields) { - allPrimitive &= field.getFieldObjectInspector() instanceof PrimitiveObjectInspector; + + if (inspector instanceof ListObjectInspector) { + ObjectInspector listElementOI = ((ListObjectInspector) inspector).getListElementObjectInspector(); + if (listElementOI instanceof PrimitiveObjectInspector) { + PrimitiveObjectInspector poi = (PrimitiveObjectInspector) listElementOI; + return createExprNodeConstantDesc(colInfo, toListConstant((List) inputConstantValue, poi)); + } } - if (allPrimitive) { - return toStructConstDesc(colInfo, inspector, fields); + if (inspector instanceof MapObjectInspector) { + ObjectInspector keyOI = ((MapObjectInspector)inspector).getMapKeyObjectInspector(); + ObjectInspector valueOI = ((MapObjectInspector)inspector).getMapValueObjectInspector(); + if (keyOI instanceof PrimitiveObjectInspector && valueOI instanceof PrimitiveObjectInspector) { + return createExprNodeConstantDesc(colInfo, toMapConstant((Map) inputConstantValue, keyOI, valueOI)); + } + } + if (inspector instanceof StructObjectInspector) { + boolean allPrimitive = true; + List fields = ((StructObjectInspector)inspector).getAllStructFieldRefs(); + for (StructField field : fields) { + allPrimitive &= field.getFieldObjectInspector() instanceof PrimitiveObjectInspector; + } + if (allPrimitive) { + return createExprNodeConstantDesc(colInfo, toStructConstDesc( + (List) ((ConstantObjectInspector) inspector).getWritableConstantValue(), fields)); + } } } // non-constant or non-primitive constants @@ -145,6 +155,13 @@ protected ExprNodeDesc toExpr(ColumnInfo colInfo, RowResolver rowResolver, int o return column; } + private static ExprNodeConstantDesc createExprNodeConstantDesc(ColumnInfo colInfo, Object constantValue) { + ExprNodeConstantDesc constantExpr = new ExprNodeConstantDesc(colInfo.getType(), constantValue); + constantExpr.setFoldedFromCol(colInfo.getInternalName()); + constantExpr.setFoldedFromTab(colInfo.getTabAlias()); + return constantExpr; + } + private static ExprNodeConstantDesc toPrimitiveConstDesc(ColumnInfo colInfo, ObjectInspector inspector) { PrimitiveObjectInspector poi = (PrimitiveObjectInspector) inspector; Object constant = ((ConstantObjectInspector) inspector).getWritableConstantValue(); @@ -155,50 +172,33 @@ private static ExprNodeConstantDesc toPrimitiveConstDesc(ColumnInfo colInfo, Obj return constantExpr; } - private static ExprNodeConstantDesc toListConstDesc(ColumnInfo colInfo, ObjectInspector inspector, - ObjectInspector listElementOI) { - PrimitiveObjectInspector poi = (PrimitiveObjectInspector)listElementOI; - List values = (List)((ConstantObjectInspector) inspector).getWritableConstantValue(); - List constant = new ArrayList(); - for (Object o : values) { + private static List toListConstant(List constantValue, PrimitiveObjectInspector poi) { + List constant = new ArrayList<>(constantValue.size()); + for (Object o : constantValue) { constant.add(poi.getPrimitiveJavaObject(o)); } - - ExprNodeConstantDesc constantExpr = new ExprNodeConstantDesc(colInfo.getType(), constant); - constantExpr.setFoldedFromCol(colInfo.getInternalName()); - constantExpr.setFoldedFromTab(colInfo.getTabAlias()); - return constantExpr; + return constant; } - private static ExprNodeConstantDesc toMapConstDesc(ColumnInfo colInfo, ObjectInspector inspector, - ObjectInspector keyOI, ObjectInspector valueOI) { - PrimitiveObjectInspector keyPoi = (PrimitiveObjectInspector)keyOI; - PrimitiveObjectInspector valuePoi = (PrimitiveObjectInspector)valueOI; - Map values = (Map)((ConstantObjectInspector) inspector).getWritableConstantValue(); - Map constant = new LinkedHashMap(); - for (Map.Entry e : values.entrySet()) { + private static Map toMapConstant( + Map constantValue, ObjectInspector keyOI, ObjectInspector valueOI) { + PrimitiveObjectInspector keyPoi = (PrimitiveObjectInspector) keyOI; + PrimitiveObjectInspector valuePoi = (PrimitiveObjectInspector) valueOI; + Map constant = new LinkedHashMap<>(constantValue.size()); + for (Map.Entry e : constantValue.entrySet()) { constant.put(keyPoi.getPrimitiveJavaObject(e.getKey()), valuePoi.getPrimitiveJavaObject(e.getValue())); } - - ExprNodeConstantDesc constantExpr = new ExprNodeConstantDesc(colInfo.getType(), constant); - constantExpr.setFoldedFromCol(colInfo.getInternalName()); - constantExpr.setFoldedFromTab(colInfo.getTabAlias()); - return constantExpr; + return constant; } - private static ExprNodeConstantDesc toStructConstDesc(ColumnInfo colInfo, ObjectInspector inspector, - List fields) { - List values = (List)((ConstantObjectInspector) inspector).getWritableConstantValue(); - List constant = new ArrayList(); - for (int i = 0; i < values.size(); i++) { - Object value = values.get(i); + private static List toStructConstDesc(List constantValue, List fields) { + List constant = new ArrayList<>(constantValue.size()); + for (int i = 0; i < constantValue.size(); i++) { + Object value = constantValue.get(i); PrimitiveObjectInspector fieldPoi = (PrimitiveObjectInspector) fields.get(i).getFieldObjectInspector(); constant.add(fieldPoi.getPrimitiveJavaObject(value)); } - ExprNodeConstantDesc constantExpr = new ExprNodeConstantDesc(colInfo.getType(), constant); - constantExpr.setFoldedFromCol(colInfo.getInternalName()); - constantExpr.setFoldedFromTab(colInfo.getTabAlias()); - return constantExpr; + return constant; } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/RexNodeExprFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/RexNodeExprFactory.java index a57203878dda..ee8779188d33 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/RexNodeExprFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/RexNodeExprFactory.java @@ -165,22 +165,23 @@ protected RexNode createColumnRefExpr(ColumnInfo colInfo, List rowR private int getPosition(ColumnInfo colInfo, List rowResolverList) throws SemanticException { - ColumnInfo tmp; - ColumnInfo cInfoToRet = null; int position = 0; - for (RowResolver rr : rowResolverList) { - tmp = rr.get(colInfo.getTabAlias(), colInfo.getAlias()); - if (tmp != null) { - if (cInfoToRet != null) { - throw new CalciteSemanticException("Could not resolve column name"); - } - cInfoToRet = tmp; - position += rr.getPosition(cInfoToRet.getInternalName()); - } else if (cInfoToRet == null) { + + for (RowResolver rr: rowResolverList) { + ColumnInfo tmp = rr.get(colInfo.getTabAlias(), colInfo.getAlias()); + if (tmp == null) { + // if column is not present in the RR, increment position by size of RR position += rr.getColumnInfos().size(); + } else { + // if column is present, increment position by the position of the column in RR + // and return early. + position += rr.getPosition(tmp.getInternalName()); + return position; } } - return position; + + // If we are out of the for loop, then the column is not present in any RR + throw new CalciteSemanticException("Could not resolve column name"); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/TypeCheckProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/TypeCheckProcFactory.java index 9837b325230b..f8b50ca6c297 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/type/TypeCheckProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/type/TypeCheckProcFactory.java @@ -139,6 +139,9 @@ public class TypeCheckProcFactory { serdeConstants.INTERVAL_DAY_TIME_TYPE_NAME); CONVERSION_FUNCTION_TEXT_MAP.put(HiveParser.TOK_DECIMAL, serdeConstants.DECIMAL_TYPE_NAME); + CONVERSION_FUNCTION_TEXT_MAP.put(HiveParser.TOK_MAP, "toMap"); + CONVERSION_FUNCTION_TEXT_MAP.put(HiveParser.TOK_LIST, "toArray"); + CONVERSION_FUNCTION_TEXT_MAP.put(HiveParser.TOK_STRUCT, "toStruct"); WINDOWING_TOKENS = new HashSet(); WINDOWING_TOKENS.add(HiveParser.KW_OVER); @@ -995,19 +998,12 @@ protected T getXpathOrFuncExprNodeDesc(ASTNode node, // different value type. The reason is that Hive and Calcite treat // types in IN clauses differently and it is practically impossible // to find some correct implementation unless this is done. - boolean hasNullValue = false; ListMultimap expressions = ArrayListMultimap.create(); for (int i = 1; i < children.size(); i++) { T columnDesc = children.get(0); T valueDesc = interpretNode(columnDesc, children.get(i)); - if (valueDesc == null) { - // Keep original - TypeInfo targetType = exprFactory.getTypeInfo(children.get(i)); - if (!expressions.containsKey(targetType)) { - expressions.put(targetType, columnDesc); - } - expressions.put(targetType, children.get(i)); - } else { + if (valueDesc != null) { + // Only add to the expression map if types can be coerced TypeInfo targetType = exprFactory.getTypeInfo(valueDesc); if (!expressions.containsKey(targetType)) { expressions.put(targetType, columnDesc); @@ -1015,6 +1011,14 @@ protected T getXpathOrFuncExprNodeDesc(ASTNode node, expressions.put(targetType, valueDesc); } } + if(expressions.isEmpty()) { + // We will only hit this when none of the operands inside the "in" clause can be type-coerced + // That would imply that the result of "in" is a boolean "false" + // This should not impact those cases where the "in" clause is used on a boolean column and + // there is no operand in the "in" clause that cannot be type-coerced into boolean because + // in case of boolean, Hive does not allow such use cases and throws an error + return exprFactory.createBooleanConstantExpr("false"); + } children.clear(); List newExprs = new ArrayList<>(); @@ -1134,6 +1138,10 @@ private TypeInfo getTypeInfo(ASTNode funcNameNode) throws SemanticException { return timestampLocalTZTypeInfo; case HiveParser.TOK_DECIMAL: return ParseUtils.getDecimalTypeTypeInfo(funcNameNode); + case HiveParser.TOK_MAP: + case HiveParser.TOK_LIST: + case HiveParser.TOK_STRUCT: + return ParseUtils.getComplexTypeTypeInfo(funcNameNode); default: return null; } @@ -1409,7 +1417,9 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // Return nulls for conversion operators if (CONVERSION_FUNCTION_TEXT_MAP.keySet().contains(expr.getType()) || expr.getToken().getType() == HiveParser.CharSetName - || expr.getToken().getType() == HiveParser.CharSetLiteral) { + || expr.getToken().getType() == HiveParser.CharSetLiteral + || expr.getType() == HiveParser.TOK_TABCOL + || expr.getType() == HiveParser.TOK_TABCOLLIST) { return null; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java index c1b9b27ff481..59f40b203bae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverCommonJoin.java @@ -169,7 +169,7 @@ protected Task resolveMapJoinTask( Map aliasToKnownSize = ctx.getAliasToKnownSize(); Map, Set> taskToAliases = ctx.getTaskToAliases(); - long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVESMALLTABLESFILESIZE); + long threshold = HiveConf.getLongVar(conf, HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE); Long bigTableSize = null; Long smallTablesSize = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java index 0e6816ae4056..c5aecaa9cae1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java @@ -134,9 +134,9 @@ public List> getTasks(HiveConf conf, Object objCtx) { List> resTsks = new ArrayList>(); // check if a map-reduce job is needed to merge the files // If the current size is smaller than the target, merge - long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESSIZE); + long trgtSize = conf.getLongVar(HiveConf.ConfVars.HIVE_MERGE_MAP_FILES_SIZE); long avgConditionSize = conf - .getLongVar(HiveConf.ConfVars.HIVEMERGEMAPFILESAVGSIZE); + .getLongVar(HiveConf.ConfVars.HIVE_MERGE_MAP_FILES_AVG_SIZE); trgtSize = Math.max(trgtSize, avgConditionSize); Task mvTask = ctx.getListTasks().get(0); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java index 3497f3120cd4..580b4499217e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicPartitionCtx.java @@ -52,6 +52,7 @@ public class DynamicPartitionCtx implements Serializable { private String defaultPartName; // default partition name in case of null or empty value private int maxPartsPerNode; // maximum dynamic partitions created per mapper/reducer private Pattern whiteListPattern; + private boolean hasCustomSortExprs = false; /** * Expressions describing a custom way of sorting the table before write. Expressions can reference simple * column descriptions or a tree of expressions containing more columns and UDFs. @@ -147,6 +148,7 @@ public DynamicPartitionCtx(DynamicPartitionCtx dp) { this.maxPartsPerNode = dp.maxPartsPerNode; this.whiteListPattern = dp.whiteListPattern; this.customSortExpressions = dp.customSortExpressions; + this.hasCustomSortExprs = dp.customSortExpressions != null && !dp.customSortExpressions.isEmpty(); this.customSortOrder = dp.customSortOrder; this.customSortNullOrder = dp.customSortNullOrder; } @@ -258,4 +260,12 @@ public List getCustomSortNullOrder() { public void setCustomSortNullOrder(List customSortNullOrder) { this.customSortNullOrder = customSortNullOrder; } + + public boolean hasCustomSortExprs() { + return hasCustomSortExprs; + } + + public void setHasCustomSortExprs(boolean hasCustomSortExprs) { + this.hasCustomSortExprs = hasCustomSortExprs; + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java index 04129a6adaa5..033df4e9ceff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/DynamicValue.java @@ -24,10 +24,7 @@ import org.apache.hadoop.hive.ql.exec.DynamicValueRegistry; import org.apache.hadoop.hive.ql.exec.ObjectCache; import org.apache.hadoop.hive.ql.exec.ObjectCacheFactory; -import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.sarg.LiteralDelegate; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; @@ -113,7 +110,7 @@ public Object getValue() { try { // Get object cache - String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEQUERYID); + String queryId = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID); ObjectCache cache = ObjectCacheFactory.getCache(conf, queryId, false, true); if (cache == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java index f5e3828e2cda..6709dc319e86 100755 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java @@ -173,6 +173,9 @@ public String getExprString() { if (typeInfo.getCategory() == Category.PRIMITIVE) { return getFormatted(typeInfo, value); } else if (typeInfo.getCategory() == Category.STRUCT) { + if (getWritableObjectInspector().getWritableConstantValue() == null) { + return getFormatted(typeInfo, value); + } StringBuilder sb = new StringBuilder(); sb.append("const struct("); List items = (List) getWritableObjectInspector().getWritableConstantValue(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java index 738ace050600..d5117fe768cf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.MemoryMonitorInfo; import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectSerDeContext; -import org.apache.hadoop.hive.ql.optimizer.signature.Signature; import org.apache.hadoop.hive.ql.plan.Explain.Level; import org.apache.hadoop.hive.ql.plan.Explain.Vectorization; import org.apache.hadoop.hive.ql.plan.VectorMapJoinDesc.HashTableImplementationType; @@ -531,7 +530,7 @@ private VectorizationCondition[] createNativeConditions() { conditionList.add( new VectorizationCondition( vectorMapJoinDesc.getUseOptimizedTable(), - HiveConf.ConfVars.HIVEMAPJOINUSEOPTIMIZEDTABLE.varname)); + HiveConf.ConfVars.HIVE_MAPJOIN_USE_OPTIMIZED_TABLE.varname)); conditionList.add( new VectorizationCondition( enabled, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 61d3da32a3f6..188ec71a57ea 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -1026,7 +1026,7 @@ public static String stripQuotes(String val) { * @return */ public static String removePrefixFromWarehouseConfig(String origiKey) { - String prefix = SessionState.get().getConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); + String prefix = SessionState.get().getConf().getVar(HiveConf.ConfVars.METASTORE_WAREHOUSE); if ((prefix != null) && (prefix.length() > 0)) { //Local file system is using pfile:/// {@link ProxyLocalFileSystem} prefix = prefix.replace("pfile:///", "pfile:/"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java index b5fd1814c2d6..90a7ff727a10 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java @@ -405,7 +405,7 @@ private static void extractFinalCandidates(ExprNodeDesc expr, exprInfo.convertedExpr : expr); return; } else if (!FunctionRegistry.isOpAnd(expr) && - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { ctx.addNonFinalCandidate(exprInfo != null ? exprInfo.alias : null, expr); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java index fa7f5710f6fd..01bb077daab7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java @@ -174,7 +174,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, OpWalkerInfo owi = (OpWalkerInfo) procCtx; ExprWalkerInfo childInfo = getChildWalkerInfo((Operator) nd, owi); if (childInfo != null && HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { ExprWalkerInfo unpushedPreds = mergeChildrenPred(nd, owi, null, false); return createFilter((Operator)nd, unpushedPreds, owi); } @@ -214,7 +214,7 @@ private void pushRankLimit(PTFOperator ptfOp, OpWalkerInfo owi) throws SemanticE return; } - float threshold = owi.getParseContext().getConf().getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + float threshold = owi.getParseContext().getConf().getFloatVar(HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); if (threshold <= 0 || threshold >= 1) { return; } @@ -356,7 +356,7 @@ private void pushRankLimitToRedSink(PTFOperator ptfOp, HiveConf conf, int rLimit return; } - float threshold = conf.getFloatVar(HiveConf.ConfVars.HIVELIMITPUSHDOWNMEMORYUSAGE); + float threshold = conf.getFloatVar(HiveConf.ConfVars.HIVE_LIMIT_PUSHDOWN_MEMORY_USAGE); ReduceSinkOperator rSink = (ReduceSinkOperator) gP; ReduceSinkDesc rDesc = rSink.getConf(); @@ -392,7 +392,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object o = super.process(nd, stack, procCtx, nodeOutputs); OpWalkerInfo owi = (OpWalkerInfo) procCtx; if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // The lateral view join is allowed to have a filter pushed through it. // We need to remove the filter candidate here once it has been applied. // If we do not remove it here, the candidates will be cleared out through @@ -440,7 +440,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, TableScanOperator tsOp = (TableScanOperator) nd; mergeWithChildrenPred(tsOp, owi, null, null); if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // remove all the candidate filter operators // when we get to the TS removeAllCandidates(owi); @@ -495,7 +495,7 @@ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, logExpr(nd, ewi); owi.putPrunedPreds((Operator) nd, ewi); if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // add this filter for deletion, if it does not have non-final candidates owi.addCandidateFilterOp((FilterOperator)op); Map> residual = ewi.getResidualPredicates(true); @@ -505,7 +505,7 @@ Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // merge it with children predicates boolean hasUnpushedPredicates = mergeWithChildrenPred(nd, owi, ewi, null); if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { if (hasUnpushedPredicates) { ExprWalkerInfo unpushedPreds = mergeChildrenPred(nd, owi, null, false); return createFilter((Operator)nd, unpushedPreds, owi); @@ -611,7 +611,7 @@ protected Set getAliases(Node nd) throws SemanticException { protected Object handlePredicates(Node nd, ExprWalkerInfo prunePreds, OpWalkerInfo owi) throws SemanticException { if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { return createFilter((Operator)nd, prunePreds.getResidualPredicates(true), owi); } return null; @@ -685,7 +685,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, if (operator.getNumChild() == 1 && operator.getChildOperators().get(0) instanceof JoinOperator) { if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDRECOGNIZETRANSITIVITY)) { + HiveConf.ConfVars.HIVE_PPD_RECOGNIZE_TRANSITIVITY)) { JoinOperator child = (JoinOperator) operator.getChildOperators().get(0); int targetPos = child.getParentOperators().indexOf(operator); applyFilterTransitivity(child, targetPos, owi); @@ -724,7 +724,7 @@ private void applyFilterTransitivity(JoinOperator join, int targetPos, OpWalkerI ExprWalkerInfo rsPreds = owi.getPrunedPreds(target); boolean recogniseColumnEqualities = HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPD_RECOGNIZE_COLUMN_EQUALITIES); + HiveConf.ConfVars.HIVE_PPD_RECOGNIZE_COLUMN_EQUALITIES); for (int sourcePos = 0; sourcePos < parentOperators.size(); sourcePos++) { ReduceSinkOperator source = (ReduceSinkOperator) parentOperators.get(sourcePos); List sourceKeys = source.getConf().getKeyCols(); @@ -1095,7 +1095,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Set includes = getQualifiedAliases((Operator) nd, owi); boolean hasUnpushedPredicates = mergeWithChildrenPred(nd, owi, null, includes); if (hasUnpushedPredicates && HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { if (includes != null || nd instanceof ReduceSinkOperator) { owi.getCandidateFilterOps().clear(); } else { @@ -1257,7 +1257,7 @@ protected static Object createFilter(Operator op, boolean pushFilterToStorage; HiveConf hiveConf = owi.getParseContext().getConf(); pushFilterToStorage = - hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTPPD_STORAGE); + hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_PPD_STORAGE); if (pushFilterToStorage) { condn = pushFilterToStorageHandler( (TableScanOperator) op, @@ -1288,7 +1288,7 @@ protected static Object createFilter(Operator op, } if (HiveConf.getBoolVar(owi.getParseContext().getConf(), - HiveConf.ConfVars.HIVEPPDREMOVEDUPLICATEFILTERS)) { + HiveConf.ConfVars.HIVE_PPD_REMOVE_DUPLICATE_FILTERS)) { // remove the candidate filter ops removeCandidates(op, owi); } @@ -1326,7 +1326,7 @@ private static ExprNodeGenericFuncDesc pushFilterToStorageHandler( TableScanDesc tableScanDesc = tableScanOp.getConf(); Table tbl = tableScanDesc.getTableMetadata(); - if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTINDEXFILTER)) { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER)) { // attach the original predicate to the table scan operator for index // optimizations that require the pushed predicate before pcr & later // optimizations are applied diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java index fbaabe06f258..87547116fed1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/SetProcessor.java @@ -234,7 +234,7 @@ public Map getHiveVariable() { return ss.getHiveVariables(); } }).substitute(conf, varValue); - if (conf.getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { + if (conf.getBoolVar(HiveConf.ConfVars.HIVE_CONF_VALIDATION)) { HiveConf.ConfVars confVars = HiveConf.getConfVars(key); if (confVars != null) { if (!confVars.isType(value)) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java b/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java index 3061d0a762db..94ef3932aa85 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecDriver.java @@ -194,6 +194,8 @@ public CommandProcessorResponse run() throws CommandProcessorException { boolean shouldReExecute = explainReOptimization && executionIndex==1; shouldReExecute |= cpr == null && plugins.stream().anyMatch(p -> p.shouldReExecute(executionIndex)); + LOG.info("Re-execution decision is made according to: executionIndex: {}, maxExecutions: {}, shouldReExecute: {}", + executionIndex, maxExecutions, shouldReExecute); if (executionIndex >= maxExecutions || !shouldReExecute) { if (cpr != null) { return cpr; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecuteLostAMQueryPlugin.java b/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecuteLostAMQueryPlugin.java index ce400159be30..c3a6c8a363b8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecuteLostAMQueryPlugin.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/reexec/ReExecuteLostAMQueryPlugin.java @@ -19,12 +19,15 @@ package org.apache.hadoop.hive.ql.reexec; import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.exec.tez.TezRuntimeException; import org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext; import org.apache.hadoop.hive.ql.hooks.HookContext; import org.apache.hadoop.hive.ql.plan.mapper.PlanMapper; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.HashSet; +import java.util.Set; import java.util.regex.Pattern; /** @@ -32,11 +35,21 @@ */ public class ReExecuteLostAMQueryPlugin implements IReExecutionPlugin { private static final Logger LOG = LoggerFactory.getLogger(ReExecuteLostAMQueryPlugin.class); - private boolean retryPossible; - // Lost am container have exit code -100, due to node failures. This pattern of exception is thrown when AM is managed - // by HS2. - private final Pattern lostAMContainerErrorPattern = Pattern.compile(".*AM Container for .* exited .* exitCode: -100.*"); + // Lost am container have exit code -100, due to node failures. This pattern of exception is thrown when + // AM is managed by HS2. + private static final Pattern LOST_AM_CONTAINER_ERROR_PATTERN = + Pattern.compile(".*AM Container for .* exited .* exitCode: -100.*"); + // When HS2 does not manage the AMs, tez AMs are registered with zookeeper and HS2 discovers it, + // failure of unmanaged AMs will throw AM record not being found in zookeeper. + private static final String UNMANAGED_AM_FAILURE = "AM record not found (likely died)"; + // DAG lost in the scenario described at TEZ-4543 + private static final String DAG_LOST_FAILURE = "No running DAG at present"; + + private boolean retryPossible; + // a list to track DAG ids seen by this re-execution plugin during the same query + // it can help a lot with identifying the previous DAGs in case of retries + private Set dagIds = new HashSet<>(); class LocalHook implements ExecuteWithHookContext { @Override @@ -44,15 +57,23 @@ public void run(HookContext hookContext) throws Exception { if (hookContext.getHookType() == HookContext.HookType.ON_FAILURE_HOOK) { Throwable exception = hookContext.getException(); - if (exception != null && exception.getMessage() != null) { - // When HS2 does not manage the AMs, tez AMs are registered with zookeeper and HS2 discovers it, - // failure of unmanaged AMs will throw AM record not being found in zookeeper. - String unmanagedAMFailure = "AM record not found (likely died)"; - if (lostAMContainerErrorPattern.matcher(exception.getMessage()).matches() - || exception.getMessage().contains(unmanagedAMFailure)) { + if (!(exception instanceof TezRuntimeException)) { + LOG.info("Exception is not a TezRuntimeException, no need to check further with ReExecuteLostAMQueryPlugin"); + return; + } + + TezRuntimeException tre = (TezRuntimeException)exception; + String message = tre.getMessage(); + if (message != null) { + dagIds.add(tre.getDagId()); + + if (LOST_AM_CONTAINER_ERROR_PATTERN.matcher(message).matches() + || message.contains(UNMANAGED_AM_FAILURE) + || message.contains(DAG_LOST_FAILURE)) { retryPossible = true; } - LOG.info("Got exception message: {} retryPossible: {}", exception.getMessage(), retryPossible); + LOG.info("Got exception message: {} retryPossible: {}, dags seen so far: {}", message, retryPossible, + dagIds); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java index d3e6a4722c27..cfa50d5e6369 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/scheduled/ScheduledQueryExecutionService.java @@ -227,7 +227,7 @@ private void processQuery(ScheduledQueryPollResponse q) { conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, q.getScheduleKey().getScheduleName()); conf.set(Constants.SCHEDULED_QUERY_USER, q.getUser()); conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, Long.toString(q.getExecutionId())); - conf.unset(HiveConf.ConfVars.HIVESESSIONID.varname); + conf.unset(HiveConf.ConfVars.HIVE_SESSION_ID.varname); state = new SessionState(conf, q.getUser()); state.setIsHiveServerQuery(true); SessionState.start(state); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveCustomStorageHandlerUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveCustomStorageHandlerUtils.java index 8be4cfc5b8f8..2f2f7d781c00 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveCustomStorageHandlerUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/HiveCustomStorageHandlerUtils.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.ql.Context; +import org.apache.hadoop.hive.ql.plan.DynamicPartitionCtx; import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.common.StatsSetupConst; @@ -31,6 +32,7 @@ public class HiveCustomStorageHandlerUtils { public static final String WRITE_OPERATION_CONFIG_PREFIX = "file.sink.write.operation."; + public static final String WRITE_OPERATION_IS_SORTED = "file.sink.write.operation.sorted."; public static String getTablePropsForCustomStorageHandler(Map tableProperties) { StringBuilder properties = new StringBuilder(); @@ -71,4 +73,21 @@ public static void setWriteOperation(Configuration conf, String tableName, Conte conf.set(WRITE_OPERATION_CONFIG_PREFIX + tableName, operation.name()); } + + public static void setWriteOperationIsSorted(Configuration conf, String tableName, boolean isSorted) { + if (conf == null || tableName == null) { + return; + } + + conf.set(WRITE_OPERATION_IS_SORTED + tableName, Boolean.toString(isSorted)); + } + + public static boolean getWriteOperationIsSorted(Configuration conf, String tableName) { + if (conf == null || tableName == null) { + return false; + } + + String isSortedString = conf.get(WRITE_OPERATION_IS_SORTED + tableName); + return Boolean.parseBoolean(isSortedString); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/CommandAuthorizerV2.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/CommandAuthorizerV2.java index 08e016223e4a..50fc7975da14 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/CommandAuthorizerV2.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/command/CommandAuthorizerV2.java @@ -233,8 +233,12 @@ private static void addHivePrivObject(Entity privObject, Map col = tableName2Cols == null ? null : + tableName2Cols.get(Table.getCompleteName(tbl.getDbName(), tbl.getTableName())); + hivePrivObject = new HivePrivilegeObject(privObjType, tbl.getDbName(), tbl.getTableName(), + null, col, actionType, null, null, tbl.getOwner(), tbl.getOwnerType()); + break; case SERVICE_NAME: hivePrivObject = new HivePrivilegeObject(privObjType, null, privObject.getServiceName(), null, null, actionType, null, null, null, null); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java index 71a0d22bd6b2..eac288bd88d2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/fallback/FallbackHiveAuthorizer.java @@ -221,15 +221,15 @@ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPl if (sessionCtx.getClientType() == HiveAuthzSessionContext.CLIENT_TYPE.HIVESERVER2 && hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) { - // Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries - String hooks = hiveConf.getVar(HiveConf.ConfVars.PREEXECHOOKS).trim(); + // Configure PRE_EXEC_HOOKS with DisallowTransformHook to disallow transform queries + String hooks = hiveConf.getVar(HiveConf.ConfVars.PRE_EXEC_HOOKS).trim(); if (hooks.isEmpty()) { hooks = DisallowTransformHook.class.getName(); } else { hooks = hooks + "," + DisallowTransformHook.class.getName(); } LOG.debug("Configuring hooks : " + hooks); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, hooks); + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, hooks); SettableConfigUpdater.setHiveConfWhiteList(hiveConf); String curBlackList = hiveConf.getVar(HiveConf.ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java index 12ebba63acb4..f1203dc36610 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/SQLStdHiveAccessController.java @@ -628,15 +628,15 @@ public void applyAuthorizationConfigPolicy(HiveConf hiveConf) throws HiveAuthzPl if (sessionCtx.getClientType() == CLIENT_TYPE.HIVESERVER2 && hiveConf.getBoolVar(ConfVars.HIVE_AUTHORIZATION_ENABLED)) { - // Configure PREEXECHOOKS with DisallowTransformHook to disallow transform queries - String hooks = hiveConf.getVar(ConfVars.PREEXECHOOKS).trim(); + // Configure PRE_EXEC_HOOKS with DisallowTransformHook to disallow transform queries + String hooks = hiveConf.getVar(ConfVars.PRE_EXEC_HOOKS).trim(); if (hooks.isEmpty()) { hooks = DisallowTransformHook.class.getName(); } else { hooks = hooks + "," + DisallowTransformHook.class.getName(); } LOG.debug("Configuring hooks : " + hooks); - hiveConf.setVar(ConfVars.PREEXECHOOKS, hooks); + hiveConf.setVar(ConfVars.PRE_EXEC_HOOKS, hooks); SettableConfigUpdater.setHiveConfWhiteList(hiveConf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java index 62105dcec5a6..305923023800 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/ClearDanglingScratchDir.java @@ -19,11 +19,9 @@ import java.io.File; import java.io.IOException; -import java.io.OutputStream; import java.util.ArrayList; import java.util.List; -import com.google.common.annotations.VisibleForTesting; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; @@ -101,7 +99,7 @@ public static void main(String[] args) throws Exception { if (cli.hasOption("s")) { rootHDFSDir = cli.getOptionValue("s"); } else { - rootHDFSDir = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR); + rootHDFSDir = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR); } ClearDanglingScratchDir clearDanglingScratchDirMain = new ClearDanglingScratchDir(dryRun, verbose, true, rootHDFSDir, conf); @@ -186,7 +184,7 @@ public void run() { return; } consoleMessage("Removing " + scratchDirToRemove.size() + " scratch directories"); - String localTmpDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR); + String localTmpDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR); for (Path scratchDir : scratchDirToRemove) { if (dryRun) { System.out.println(scratchDir); @@ -254,17 +252,12 @@ static Options createOptions() { */ private void removeLocalTmpFiles(String sessionName, String localTmpdir) { File[] files = new File(localTmpdir).listFiles(fn -> fn.getName().startsWith(sessionName)); - boolean success; if (files != null) { for (File file : files) { - success = false; - if (file.canWrite()) { - success = file.delete(); - } - if (success) { + if (file.canWrite() && file.delete()) { consoleMessage("While removing '" + sessionName + "' dangling scratch dir from HDFS, " + "local tmp session file '" + file.getPath() + "' has been cleaned as well."); - } else if (file.getName().startsWith(sessionName)) { + } else { consoleMessage("Even though '" + sessionName + "' is marked as dangling session dir, " + "local tmp session file '" + file.getPath() + "' could not be removed."); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index cb7ed48a58a5..533462fe5074 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -391,7 +391,7 @@ public void deleteTmpErrOutputFile() { public boolean getIsSilent() { if(sessionConf != null) { - return sessionConf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT); + return sessionConf.getBoolVar(HiveConf.ConfVars.HIVE_SESSION_SILENT); } else { return isSilent; } @@ -407,7 +407,7 @@ public boolean isHiveServerQuery() { public void setIsSilent(boolean isSilent) { if(sessionConf != null) { - sessionConf.setBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT, isSilent); + sessionConf.setBoolVar(HiveConf.ConfVars.HIVE_SESSION_SILENT, isSilent); } this.isSilent = isSilent; } @@ -463,13 +463,13 @@ public SessionState(HiveConf conf, String userName, CleanupService cleanupServic if (LOG.isDebugEnabled()) { LOG.debug("SessionState user: " + userName); } - isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT); + isSilent = conf.getBoolVar(HiveConf.ConfVars.HIVE_SESSION_SILENT); resourceMaps = new ResourceMaps(); // Must be deterministic order map for consistent q-test output across Java versions overriddenConfigurations = new LinkedHashMap(); // if there isn't already a session name, go ahead and create it. - if (StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HIVESESSIONID))) { - conf.setVar(HiveConf.ConfVars.HIVESESSIONID, makeSessionId()); + if (StringUtils.isEmpty(conf.getVar(HiveConf.ConfVars.HIVE_SESSION_ID))) { + conf.setVar(HiveConf.ConfVars.HIVE_SESSION_ID, makeSessionId()); getConsole().printInfo("Hive Session ID = " + getSessionId()); } // Using system classloader as the parent. Using thread context @@ -500,7 +500,7 @@ public void setHiveVariables(Map hiveVariables) { } public String getSessionId() { - return (sessionConf.getVar(HiveConf.ConfVars.HIVESESSIONID)); + return (sessionConf.getVar(HiveConf.ConfVars.HIVE_SESSION_ID)); } public void updateThreadName() { @@ -733,7 +733,9 @@ private static void start(SessionState startSs, boolean isAsync, LogHelper conso } String engine = HiveConf.getVar(startSs.getConf(), HiveConf.ConfVars.HIVE_EXECUTION_ENGINE); - if (!engine.equals("tez") || startSs.isHiveServerQuery) { + + if (!engine.equals("tez") || startSs.isHiveServerQuery + || !HiveConf.getBoolVar(startSs.getConf(), ConfVars.HIVE_CLI_TEZ_INITIALIZE_SESSION)) { return; } @@ -781,14 +783,14 @@ private void createSessionDirs(String userName) throws IOException { HiveConf conf = getConf(); Path rootHDFSDirPath = createRootHDFSDir(conf); // Now create session specific dirs - String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIRPERMISSION); + String scratchDirPermission = HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR_PERMISSION); Path path; // 1. HDFS scratch dir path = new Path(rootHDFSDirPath, userName); hdfsScratchDirURIString = path.toUri().toString(); createPath(conf, path, scratchDirPermission, false, false); // 2. Local scratch dir - path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); createPath(conf, path, scratchDirPermission, true, false); // 3. Download resources dir path = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.DOWNLOADED_RESOURCES_DIR)); @@ -812,7 +814,7 @@ private void createSessionDirs(String userName) throws IOException { hdfsSessionPathLockFile = fs.create(new Path(hdfsSessionPath, LOCK_FILE_NAME), true); } // 6. Local session path - localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR), sessionId); + localSessionPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR), sessionId); createPath(conf, localSessionPath, scratchDirPermission, true, true); conf.set(LOCAL_SESSION_PATH_KEY, localSessionPath.toUri().toString()); // 7. HDFS temp table space @@ -837,7 +839,7 @@ private void createSessionDirs(String userName) throws IOException { * @throws IOException */ private Path createRootHDFSDir(HiveConf conf) throws IOException { - Path rootHDFSDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCHDIR)); + Path rootHDFSDirPath = new Path(HiveConf.getVar(conf, HiveConf.ConfVars.SCRATCH_DIR)); Utilities.ensurePathIsWritable(rootHDFSDirPath, conf); return rootHDFSDirPath; } @@ -1077,8 +1079,8 @@ public void setActiveAuthorizer(Object authorizer) { * @throws IOException */ private static File createTempFile(HiveConf conf) throws IOException { - String lScratchDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCALSCRATCHDIR); - String sessionID = conf.getVar(HiveConf.ConfVars.HIVESESSIONID); + String lScratchDir = HiveConf.getVar(conf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR); + String sessionID = conf.getVar(HiveConf.ConfVars.HIVE_SESSION_ID); return FileUtils.createTempFile(lScratchDir, sessionID, ".pipeout"); } @@ -1462,10 +1464,10 @@ public void loadAuxJars() throws IOException { public void loadReloadableAuxJars() throws IOException { LOG.info("Reloading auxiliary JAR files"); - final String renewableJarPath = sessionConf.getVar(ConfVars.HIVERELOADABLEJARS); + final String renewableJarPath = sessionConf.getVar(ConfVars.HIVE_RELOADABLE_JARS); // do nothing if this property is not specified or empty if (StringUtils.isBlank(renewableJarPath)) { - LOG.warn("Configuration {} not specified", ConfVars.HIVERELOADABLEJARS); + LOG.warn("Configuration {} not specified", ConfVars.HIVE_RELOADABLE_JARS); return; } @@ -2113,7 +2115,7 @@ public void setupQueryCurrentTimestamp() { // Provide a facility to set current timestamp during tests if (sessionConf.getBoolVar(ConfVars.HIVE_IN_TEST)) { String overrideTimestampString = - HiveConf.getVar(sessionConf, HiveConf.ConfVars.HIVETESTCURRENTTIMESTAMP, (String)null); + HiveConf.getVar(sessionConf, HiveConf.ConfVars.HIVE_TEST_CURRENT_TIMESTAMP, (String)null); if (overrideTimestampString != null && overrideTimestampString.length() > 0) { TimestampTZ zonedDateTime = TimestampTZUtil.convert( Timestamp.valueOf(overrideTimestampString), sessionConf.getLocalTimeZone()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionStateUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionStateUtil.java index f831f0920b66..0009a54c3a3c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionStateUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionStateUtil.java @@ -31,6 +31,7 @@ public class SessionStateUtil { private static final Logger LOG = LoggerFactory.getLogger(SessionStateUtil.class); private static final String COMMIT_INFO_PREFIX = "COMMIT_INFO."; + public static final String DEFAULT_TABLE_LOCATION = "defaultLocation"; private SessionStateUtil() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java index 723f64f594c7..244d5cd0b710 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsNoJobTask.java @@ -231,7 +231,7 @@ public void run() { } ThreadPoolExecutor tpE = null; List> futures = null; - int numThreadsFactor = HiveConf.getIntVar(jc, HiveConf.ConfVars.BASICSTATSTASKSMAXTHREADSFACTOR); + int numThreadsFactor = HiveConf.getIntVar(jc, HiveConf.ConfVars.BASIC_STATS_TASKS_MAX_THREADS_FACTOR); if (fileList.size() > 1 && numThreadsFactor > 0) { int numThreads = Math.min(fileList.size(), numThreadsFactor * Runtime.getRuntime().availableProcessors()); ThreadFactory threadFactory = new ThreadFactoryBuilder().setNameFormat("Basic-Stats-Thread-%d").build(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java index ed968edce0cf..4abb77641fe7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/BasicStatsTask.java @@ -311,8 +311,9 @@ private int aggregateStats(Hive db, Table tbl) { if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { console.printInfo("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']'); + } else { + LOG.info("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']'); } - LOG.info("Table " + tableFullName + " stats: [" + toString(p.getPartParameters()) + ']'); // The table object is assigned to the latest table object. // So that it can be used by ColStatsProcessor. @@ -377,8 +378,9 @@ public Void call() throws Exception { updates.add((Partition) res); if (conf.getBoolVar(ConfVars.TEZ_EXEC_SUMMARY)) { console.printInfo("Partition " + basicStatsProcessor.partish.getPartition().getSpec() + " stats: [" + toString(basicStatsProcessor.partish.getPartParameters()) + ']'); + } else { + LOG.info("Partition " + basicStatsProcessor.partish.getPartition().getSpec() + " stats: [" + toString(basicStatsProcessor.partish.getPartParameters()) + ']'); } - LOG.info("Partition " + basicStatsProcessor.partish.getPartition().getSpec() + " stats: [" + toString(basicStatsProcessor.partish.getPartParameters()) + ']'); } if (!updates.isEmpty()) { @@ -429,7 +431,7 @@ private ExecutorService buildBasicStatsExecutor() { } private StatsAggregator createStatsAggregator(StatsCollectionContext scc, HiveConf conf) throws HiveException { - String statsImpl = HiveConf.getVar(conf, HiveConf.ConfVars.HIVESTATSDBCLASS); + String statsImpl = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_STATS_DBCLASS); StatsFactory factory = StatsFactory.newFactory(statsImpl, conf); if (factory == null) { throw new HiveException(ErrorMsg.STATSPUBLISHER_NOT_OBTAINED.getErrorCodedMsg()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java index e2777a128bc4..285175414d62 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/ColStatsProcessor.java @@ -148,7 +148,7 @@ private boolean constructColumnStatsFromPackedRows(Table tbl, List op : baseWork.getAllOperators()) { String operatorId = op.getOperatorId(); OperatorStats operatorStats = null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java index 2e2abb6cb2fe..8d79f6a2c95f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsFactory.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.util.ReflectionUtils; -import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVESTATSDBCLASS; +import static org.apache.hadoop.hive.conf.HiveConf.ConfVars.HIVE_STATS_DBCLASS; /** * A factory of stats publisher and aggregator implementations of the @@ -43,7 +43,7 @@ public final class StatsFactory { private final Configuration jobConf; public static StatsFactory newFactory(Configuration conf) { - return newFactory(HiveConf.getVar(conf, HIVESTATSDBCLASS), conf); + return newFactory(HiveConf.getVar(conf, HIVE_STATS_DBCLASS), conf); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 1ba231546b46..239f57b69b3e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -605,7 +605,7 @@ public static ColStatistics getColStatsForPartCol(ColumnInfo ci,PartitionIterabl partCS.setAvgColLen(StatsUtils.getAvgColLenOf(conf, ci.getObjectInspector(), partCS.getColumnType())); partCS.setRange(getRangePartitionColumn(partList, ci.getInternalName(), - ci.getType().getTypeName(), conf.getVar(ConfVars.DEFAULTPARTITIONNAME))); + ci.getType().getTypeName(), conf.getVar(ConfVars.DEFAULT_PARTITION_NAME))); return partCS; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java index ce822effe7b0..3493ea4d8da6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Cleaner.java @@ -89,7 +89,7 @@ public void run() { for (TaskHandler cleanupHandler : cleanupHandlers) { try { - checkInterrupt(); + CompactorUtil.checkInterrupt(CLASS_NAME); List tasks = cleanupHandler.getTasks(); List> asyncTasks = new ArrayList<>(); for (Runnable task : tasks) { @@ -112,7 +112,7 @@ public void run() { throw t; } } - checkInterrupt(); + CompactorUtil.checkInterrupt(CLASS_NAME); } catch (InterruptedException e) { Thread.currentThread().interrupt(); return; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionHeartbeatService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionHeartbeatService.java index 788955e35c20..6ca74e4bf2fb 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionHeartbeatService.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactionHeartbeatService.java @@ -40,7 +40,7 @@ /** * Singleton service responsible for heartbeating the compaction transactions. */ -class CompactionHeartbeatService { +public class CompactionHeartbeatService { private static final Logger LOG = LoggerFactory.getLogger(CompactionHeartbeatService.class); @@ -53,7 +53,7 @@ class CompactionHeartbeatService { * @return Returns the singleton {@link CompactionHeartbeatService} * @throws IllegalStateException Thrown when the service has already been destroyed. */ - static CompactionHeartbeatService getInstance(HiveConf conf) { + public static CompactionHeartbeatService getInstance(HiveConf conf) { if (instance == null) { synchronized (CompactionHeartbeatService.class) { if (instance == null) { @@ -82,7 +82,7 @@ static CompactionHeartbeatService getInstance(HiveConf conf) { * @param tableName Required for logging only * @throws IllegalStateException Thrown when the heartbeat for the given txn has already been started. */ - void startHeartbeat(long txnId, long lockId, String tableName) { + public void startHeartbeat(long txnId, long lockId, String tableName) { if (shuttingDown) { throw new IllegalStateException("Service is shutting down, starting new heartbeats is not possible!"); } @@ -101,7 +101,7 @@ void startHeartbeat(long txnId, long lockId, String tableName) { * @throws IllegalStateException Thrown when there is no {@link CompactionHeartbeater} task associated with the * given txnId. */ - void stopHeartbeat(long txnId) throws InterruptedException { + public void stopHeartbeat(long txnId) throws InterruptedException { LOG.info("Stopping heartbeat task for TXN {}", txnId); CompactionHeartbeater heartbeater = tasks.get(txnId); if (heartbeater == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorContext.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorContext.java index 0d36402e56e7..7d16d64db50d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorContext.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.ql.io.AcidDirectory; /** @@ -38,7 +38,7 @@ public class CompactorContext { private final CompactionInfo compactionInfo; private final AcidDirectory dir; - CompactorContext(HiveConf conf, Table table, Partition p, StorageDescriptor sd, ValidWriteIdList tblValidWriteIds, CompactionInfo ci, AcidDirectory dir) { + public CompactorContext(HiveConf conf, Table table, Partition p, StorageDescriptor sd, ValidWriteIdList tblValidWriteIds, CompactionInfo ci, AcidDirectory dir) { this.conf = conf; this.table = table; this.partition = p; @@ -48,6 +48,10 @@ public class CompactorContext { this.dir = dir; } + public CompactorContext(HiveConf conf, Table table, CompactionInfo ci) { + this(conf, table, null, null, null, ci, null); + } + public HiveConf getConf() { return conf; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorFactory.java index 786391c48e7b..84ce492ae610 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorFactory.java @@ -21,7 +21,9 @@ import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.slf4j.Logger; @@ -32,6 +34,7 @@ */ public final class CompactorFactory { private static final Logger LOG = LoggerFactory.getLogger(CompactorFactory.class.getName()); + private static final String ICEBERG_MAJOR_QUERY_COMPACTOR_CLASS = "org.apache.iceberg.mr.hive.compaction.IcebergMajorQueryCompactor"; private static final CompactorFactory INSTANCE = new CompactorFactory(); @@ -67,7 +70,7 @@ private CompactorFactory() { * @param compactionInfo provides insight about the type of compaction, must be not null. * @return {@link QueryCompactor} or null. */ - public CompactorPipeline getCompactorPipeline(Table table, HiveConf configuration, CompactionInfo compactionInfo,IMetaStoreClient msc) + public CompactorPipeline getCompactorPipeline(Table table, HiveConf configuration, CompactionInfo compactionInfo, IMetaStoreClient msc) throws HiveException { if (AcidUtils.isFullAcidTable(table.getParameters())) { if (!"tez".equalsIgnoreCase(HiveConf.getVar(configuration, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE)) || @@ -108,6 +111,24 @@ public CompactorPipeline getCompactorPipeline(Table table, HiveConf configuratio throw new HiveException( compactionInfo.type.name() + " compaction is not supported on insert only tables."); } + } else if (MetaStoreUtils.isIcebergTable(table.getParameters())) { + switch (compactionInfo.type) { + case MAJOR: + + try { + Class icebergMajorQueryCompactor = (Class) + Class.forName(ICEBERG_MAJOR_QUERY_COMPACTOR_CLASS, true, + Utilities.getSessionSpecifiedClassLoader()); + + return new CompactorPipeline(icebergMajorQueryCompactor.newInstance()); + } + catch (Exception e) { + throw new HiveException("Failed instantiating and calling Iceberg compactor"); + } + default: + throw new HiveException( + compactionInfo.type.name() + " compaction is not supported on Iceberg tables."); + } } throw new HiveException("Only transactional tables can be compacted, " + table.getTableName() + "is not suitable " + "for compaction!"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java index f26e832e0886..09296293f0ac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorThread.java @@ -23,28 +23,18 @@ import org.apache.hadoop.hive.common.ServerUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.LockComponentBuilder; -import org.apache.hadoop.hive.metastore.LockRequestBuilder; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.DataOperationType; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockType; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; -import org.apache.hadoop.hive.ql.io.AcidDirectory; -import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; -import java.util.Arrays; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; /** @@ -90,12 +80,6 @@ public void init(AtomicBoolean stop) throws Exception { this.runtimeVersion = getRuntimeVersion(); } - protected void checkInterrupt() throws InterruptedException { - if (Thread.interrupted()) { - throw new InterruptedException(getClass().getName() + " execution is interrupted."); - } - } - /** * Find the table being compacted * @param ci compaction info returned from the compaction queue @@ -121,67 +105,7 @@ protected void checkInterrupt() throws InterruptedException { * @throws MetaException if underlying calls throw, or if the partition name resolves to more than * one partition. */ - protected Partition resolvePartition(CompactionInfo ci) throws MetaException { - if (ci.partName != null) { - List parts; - try { - parts = getPartitionsByNames(ci); - if (parts == null || parts.size() == 0) { - // The partition got dropped before we went looking for it. - return null; - } - } catch (Exception e) { - LOG.error("Unable to find partition " + ci.getFullPartitionName(), e); - throw e; - } - if (parts.size() != 1) { - LOG.error(ci.getFullPartitionName() + " does not refer to a single partition. " + - Arrays.toString(parts.toArray())); - throw new MetaException("Too many partitions for : " + ci.getFullPartitionName()); - } - return parts.get(0); - } else { - return null; - } - } - - /** - * Check for that special case when minor compaction is supported or not. - *
    - *
  • The table is Insert-only OR
  • - *
  • Query based compaction is not enabled OR
  • - *
  • The table has only acid data in it.
  • - *
- * @param tblproperties The properties of the table to check - * @param dir The {@link AcidDirectory} instance pointing to the table's folder on the filesystem. - * @return Returns true if minor compaction is supported based on the given parameters, false otherwise. - */ - protected boolean isMinorCompactionSupported(Map tblproperties, AcidDirectory dir) { - //Query based Minor compaction is not possible for full acid tables having raw format (non-acid) data in them. - return AcidUtils.isInsertOnlyTable(tblproperties) || !conf.getBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED) - || !(dir.getOriginalFiles().size() > 0 || dir.getCurrentDirectories().stream().anyMatch(AcidUtils.ParsedDelta::isRawFormat)); - } - - /** - * Get the storage descriptor for a compaction. - * @param t table from {@link #resolveTable(org.apache.hadoop.hive.metastore.txn.CompactionInfo)} - * @param p table from {@link #resolvePartition(org.apache.hadoop.hive.metastore.txn.CompactionInfo)} - * @return metastore storage descriptor. - */ - protected StorageDescriptor resolveStorageDescriptor(Table t, Partition p) { - return (p == null) ? t.getSd() : p.getSd(); - } - - /** - * Determine whether to run this job as the current user or whether we need a doAs to switch - * users. - * @param owner of the directory we will be working in, as determined by - * {@link org.apache.hadoop.hive.metastore.txn.TxnUtils#findUserToRunAs(String, Table, Configuration)} - * @return true if the job should run as the current user, false if a doAs is needed. - */ - protected boolean runJobAsSelf(String owner) { - return (owner.equals(System.getProperty("user.name"))); - } + abstract protected Partition resolvePartition(CompactionInfo ci) throws MetaException; protected String tableName(Table t) { return Warehouse.getQualifiedName(t); @@ -209,29 +133,6 @@ protected String getRuntimeVersion() { return this.getClass().getPackage().getImplementationVersion(); } - protected LockRequest createLockRequest(CompactionInfo ci, long txnId, LockType lockType, DataOperationType opType) { - String agentInfo = Thread.currentThread().getName(); - LockRequestBuilder requestBuilder = new LockRequestBuilder(agentInfo); - requestBuilder.setUser(ci.runAs); - requestBuilder.setTransactionId(txnId); - - LockComponentBuilder lockCompBuilder = new LockComponentBuilder() - .setLock(lockType) - .setOperationType(opType) - .setDbName(ci.dbname) - .setTableName(ci.tableName) - .setIsTransactional(true); - - if (ci.partName != null) { - lockCompBuilder.setPartitionName(ci.partName); - } - requestBuilder.addLockComponent(lockCompBuilder.build()); - - requestBuilder.setZeroWaitReadEnabled(!conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK) || - !conf.getBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK)); - return requestBuilder.build(); - } - protected void doPostLoopActions(long elapsedTime) throws InterruptedException { String threadTypeName = getClass().getName(); if (elapsedTime < checkInterval && !stop.get()) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java index a8b45b0baec2..c8d86c7ea276 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorUtil.java @@ -20,16 +20,23 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.LockComponentBuilder; +import org.apache.hadoop.hive.metastore.LockRequestBuilder; +import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockType; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.hive.ql.io.AcidDirectory; +import org.apache.hadoop.hive.ql.io.AcidUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -37,6 +44,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.concurrent.ExecutorService; import java.util.concurrent.ForkJoinPool; @@ -121,6 +129,10 @@ public static StorageDescriptor resolveStorageDescriptor(Table t, Partition p) { return (p == null) ? t.getSd() : p.getSd(); } + public static StorageDescriptor resolveStorageDescriptor(Table t) { + return resolveStorageDescriptor(t, null); + } + public static boolean isDynPartAbort(Table t, String partName) { return Optional.ofNullable(t).map(Table::getPartitionKeys).filter(pk -> !pk.isEmpty()).isPresent() && partName == null; @@ -188,4 +200,99 @@ public static List getObsoleteDirs(AcidDirectory dir, boolean isDynPartAbo } return obsoleteDirs; } + + public static Partition resolvePartition(HiveConf conf, IMetaStoreClient msc, String dbName, String tableName, + String partName, METADATA_FETCH_MODE fetchMode) throws MetaException { + if (partName != null) { + List parts = null; + try { + + switch (fetchMode) { + case LOCAL: parts = CompactorUtil.getPartitionsByNames(conf, dbName, tableName, partName); + break; + case REMOTE: parts = RemoteCompactorUtil.getPartitionsByNames(msc, dbName, tableName, partName); + break; + } + + if (parts == null || parts.size() == 0) { + // The partition got dropped before we went looking for it. + return null; + } + } catch (Exception e) { + LOG.error("Unable to find partition " + getFullPartitionName(dbName, tableName, partName), e); + throw e; + } + if (parts.size() != 1) { + LOG.error(getFullPartitionName(dbName, tableName, partName) + " does not refer to a single partition. " + + Arrays.toString(parts.toArray())); + throw new MetaException("Too many partitions for : " + getFullPartitionName(dbName, tableName, partName)); + } + return parts.get(0); + } else { + return null; + } + } + + public static String getFullPartitionName(String dbName, String tableName, String partName) { + StringBuilder buf = new StringBuilder(); + buf.append(dbName); + buf.append('.'); + buf.append(tableName); + if (partName != null) { + buf.append('.'); + buf.append(partName); + } + return buf.toString(); + } + + public enum METADATA_FETCH_MODE { + LOCAL, + REMOTE + } + + public static void checkInterrupt(String callerClassName) throws InterruptedException { + if (Thread.interrupted()) { + throw new InterruptedException(callerClassName + " execution is interrupted."); + } + } + + /** + * Check for that special case when minor compaction is supported or not. + *
    + *
  • The table is Insert-only OR
  • + *
  • Query based compaction is not enabled OR
  • + *
  • The table has only acid data in it.
  • + *
+ * @param tblproperties The properties of the table to check + * @param dir The {@link AcidDirectory} instance pointing to the table's folder on the filesystem. + * @return Returns true if minor compaction is supported based on the given parameters, false otherwise. + */ + public static boolean isMinorCompactionSupported(HiveConf conf, Map tblproperties, AcidDirectory dir) { + //Query based Minor compaction is not possible for full acid tables having raw format (non-acid) data in them. + return AcidUtils.isInsertOnlyTable(tblproperties) || !conf.getBoolVar(HiveConf.ConfVars.COMPACTOR_CRUD_QUERY_BASED) + || !(dir.getOriginalFiles().size() > 0 || dir.getCurrentDirectories().stream().anyMatch(AcidUtils.ParsedDelta::isRawFormat)); + } + + public static LockRequest createLockRequest(HiveConf conf, CompactionInfo ci, long txnId, LockType lockType, DataOperationType opType) { + String agentInfo = Thread.currentThread().getName(); + LockRequestBuilder requestBuilder = new LockRequestBuilder(agentInfo); + requestBuilder.setUser(ci.runAs); + requestBuilder.setTransactionId(txnId); + + LockComponentBuilder lockCompBuilder = new LockComponentBuilder() + .setLock(lockType) + .setOperationType(opType) + .setDbName(ci.dbname) + .setTableName(ci.tableName) + .setIsTransactional(true); + + if (ci.partName != null) { + lockCompBuilder.setPartitionName(ci.partName); + } + requestBuilder.addLockComponent(lockCompBuilder.build()); + + requestBuilder.setZeroWaitReadEnabled(!conf.getBoolVar(HiveConf.ConfVars.TXN_OVERWRITE_X_LOCK) || + !conf.getBoolVar(HiveConf.ConfVars.TXN_WRITE_X_LOCK)); + return requestBuilder.build(); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java index bb48c8f219bf..58cb478bbe82 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Initiator.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.metrics.PerfLogger; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -107,7 +107,7 @@ public void run() { final ShowCompactResponse currentCompactions = txnHandler.showCompact(new ShowCompactRequest()); - checkInterrupt(); + CompactorUtil.checkInterrupt(CLASS_NAME); // Currently we invalidate all entries after each cycle, because the bootstrap replication is marked via // table property hive.repl.first.inc.pending which would be cached. @@ -122,7 +122,7 @@ public void run() { .collect(Collectors.toSet())).get(); LOG.debug("Found {} potential compactions, checking to see if we should compact any of them", potentials.size()); - checkInterrupt(); + CompactorUtil.checkInterrupt(CLASS_NAME); Map tblNameOwners = new HashMap<>(); List> compactionList = new ArrayList<>(); @@ -234,7 +234,7 @@ protected String resolveUserToRunAs(Map cache, Table t, Partitio throws IOException, InterruptedException { //Figure out who we should run the file operations as String fullTableName = TxnUtils.getFullTableName(t.getDbName(), t.getTableName()); - StorageDescriptor sd = resolveStorageDescriptor(t, p); + StorageDescriptor sd = CompactorUtil.resolveStorageDescriptor(t, p); String user = cache.get(fullTableName); if (user == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/InitiatorBase.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/InitiatorBase.java index 14dd2ebffe02..8f632dbd398c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/InitiatorBase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/InitiatorBase.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; import org.apache.hadoop.hive.metastore.api.CompactionType; @@ -40,10 +39,9 @@ import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.hive.ql.io.AcidDirectory; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.shims.HadoopShims; @@ -68,7 +66,7 @@ private List initiateCompactionForMultiplePartitions(Table t List compactionResponses = new ArrayList<>(); partitions.entrySet().parallelStream().forEach(entry -> { try { - StorageDescriptor sd = resolveStorageDescriptor(table, entry.getValue()); + StorageDescriptor sd = CompactorUtil.resolveStorageDescriptor(table, entry.getValue()); String runAs = TxnUtils.findUserToRunAs(sd.getLocation(), table, conf); CompactionInfo ci = new CompactionInfo(table.getDbName(), table.getTableName(), entry.getKey(), request.getType()); @@ -195,7 +193,7 @@ private CompactionType determineCompactionType(CompactionInfo ci, AcidDirectory LOG.debug("Found " + deltas.size() + " delta files, and " + (noBase ? "no" : "has") + " base," + "requesting " + (noBase ? "major" : "minor") + " compaction"); - return noBase || !isMinorCompactionSupported(tblProperties, dir) ? CompactionType.MAJOR : CompactionType.MINOR; + return noBase || !CompactorUtil.isMinorCompactionSupported(conf, tblProperties, dir) ? CompactionType.MAJOR : CompactionType.MINOR; } private long getBaseSize(AcidDirectory dir) throws IOException { @@ -243,7 +241,7 @@ private CompactionType checkForCompaction(final CompactionInfo ci, final ValidWr AcidMetricService.updateMetricsFromInitiator(ci.dbname, ci.tableName, ci.partName, conf, txnHandler, baseSize, deltaSizes, acidDirectory.getObsolete()); - if (runJobAsSelf(runAs)) { + if (CompactorUtil.runJobAsSelf(runAs)) { return determineCompactionType(ci, acidDirectory, tblProperties, baseSize, deltaSize); } else { LOG.info("Going to initiate as user " + runAs + " for " + ci.getFullPartitionName()); @@ -279,11 +277,11 @@ private ValidWriteIdList resolveValidWriteIds(Table t) protected CompactionResponse scheduleCompactionIfRequired(CompactionInfo ci, Table t, Partition p, String runAs, boolean metricsEnabled) throws MetaException { - StorageDescriptor sd = resolveStorageDescriptor(t, p); + StorageDescriptor sd = CompactorUtil.resolveStorageDescriptor(t, p); try { ValidWriteIdList validWriteIds = resolveValidWriteIds(t); - checkInterrupt(); + CompactorUtil.checkInterrupt(InitiatorBase.class.getName()); CompactionType type = checkForCompaction(ci, validWriteIds, sd, t.getParameters(), runAs); if (type != null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java index f9044fa3d296..3443ea204558 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MRCompactor.java @@ -47,7 +47,7 @@ import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.io.AcidInputFormat; @@ -425,7 +425,7 @@ private void setColumnTypes(JobConf job, List cols) { } job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS, colNames.toString()); job.set(IOConstants.SCHEMA_EVOLUTION_COLUMNS_TYPES, colTypes.toString()); - HiveConf.setVar(job, HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + HiveConf.setVar(job, HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); } public JobConf getMrJob() { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MergeCompactor.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MergeCompactor.java index 295ebe564178..7e5a3608e462 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MergeCompactor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MergeCompactor.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.ql.io.AcidDirectory; import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java index 39d0e10f589a..0878330cc3e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/MetaStoreCompactorThread.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.Metrics; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; @@ -88,6 +88,11 @@ public void init(AtomicBoolean stop) throws Exception { return CompactorUtil.getPartitionsByNames(conf, ci.dbname, ci.tableName, ci.partName); } + protected Partition resolvePartition(CompactionInfo ci) throws MetaException { + return CompactorUtil.resolvePartition(conf, null, ci.dbname, ci.tableName, ci.partName, + CompactorUtil.METADATA_FETCH_MODE.LOCAL); + } + protected abstract boolean isCacheEnabled(); protected void startCycleUpdater(long updateInterval, Runnable taskToRun) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/QueryCompactor.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/QueryCompactor.java index 01b24404beb7..384a386dfb6b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/QueryCompactor.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/QueryCompactor.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.hive.ql.DriverUtils; import org.apache.hadoop.hive.ql.io.AcidDirectory; @@ -49,7 +49,7 @@ /** * Common interface for query based compactions. */ -abstract class QueryCompactor implements Compactor { +public abstract class QueryCompactor implements Compactor { private static final Logger LOG = LoggerFactory.getLogger(QueryCompactor.class.getName()); private static final String COMPACTOR_PREFIX = "compactor."; @@ -68,6 +68,22 @@ abstract class QueryCompactor implements Compactor { protected void commitCompaction(String dest, String tmpTableName, HiveConf conf, ValidWriteIdList actualWriteIds, long compactorTxnId) throws IOException, HiveException {} + protected SessionState setupQueryCompactionSession(HiveConf conf, CompactionInfo compactionInfo, Map tblProperties) { + String queueName = HiveConf.getVar(conf, HiveConf.ConfVars.COMPACTOR_JOB_QUEUE); + if (queueName != null && queueName.length() > 0) { + conf.set(TezConfiguration.TEZ_QUEUE_NAME, queueName); + } + Util.disableLlapCaching(conf); + conf.set(HiveConf.ConfVars.HIVE_QUOTEDID_SUPPORT.varname, "column"); + conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_HDFS_ENCRYPTION_SHIM_CACHE_ON, false); + Util.overrideConfProps(conf, compactionInfo, tblProperties); + String user = compactionInfo.runAs; + SessionState sessionState = DriverUtils.setUpSessionState(conf, user, true); + sessionState.setCompaction(true); + return sessionState; + } + /** * Run all the queries which performs the compaction. * @param conf hive configuration, must be not null. @@ -86,18 +102,7 @@ void runCompactionQueries(HiveConf conf, String tmpTableName, StorageDescriptor ValidWriteIdList writeIds, CompactionInfo compactionInfo, List resultDirs, List createQueries, List compactionQueries, List dropQueries, Map tblProperties) throws IOException { - String queueName = HiveConf.getVar(conf, HiveConf.ConfVars.COMPACTOR_JOB_QUEUE); - if (queueName != null && queueName.length() > 0) { - conf.set(TezConfiguration.TEZ_QUEUE_NAME, queueName); - } - Util.disableLlapCaching(conf); - conf.set(HiveConf.ConfVars.HIVE_QUOTEDID_SUPPORT.varname, "column"); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, true); - conf.setBoolVar(HiveConf.ConfVars.HIVE_HDFS_ENCRYPTION_SHIM_CACHE_ON, false); - Util.overrideConfProps(conf, compactionInfo, tblProperties); - String user = compactionInfo.runAs; - SessionState sessionState = DriverUtils.setUpSessionState(conf, user, true); - sessionState.setCompaction(true); + SessionState sessionState = setupQueryCompactionSession(conf, compactionInfo, tblProperties); long compactorTxnId = Compactor.getCompactorTxnId(conf); try { for (String query : createQueries) { @@ -170,7 +175,7 @@ private void removeResultDirs(List resultDirPaths, HiveConf conf) throws I /** * Collection of some helper functions. */ - static class Util { + public static class Util { /** * Get the path of the base, delta, or delete delta directory that will be the final @@ -186,8 +191,8 @@ static class Util { * * @return Path of new base/delta/delete delta directory */ - static Path getCompactionResultDir(StorageDescriptor sd, ValidWriteIdList writeIds, HiveConf conf, - boolean writingBase, boolean createDeleteDelta, boolean bucket0, AcidDirectory directory) { + public static Path getCompactionResultDir(StorageDescriptor sd, ValidWriteIdList writeIds, HiveConf conf, + boolean writingBase, boolean createDeleteDelta, boolean bucket0, AcidDirectory directory) { long minWriteID = writingBase ? 1 : getMinWriteID(directory); long highWatermark = writeIds.getHighWatermark(); long compactorTxnId = Compactor.getCompactorTxnId(conf); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorThread.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorThread.java index 9055a1a67163..f95834ac23d6 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorThread.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorThread.java @@ -20,20 +20,17 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.ql.exec.repl.util.ReplUtils; import org.apache.thrift.TException; -import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.convertToGetPartitionsByNamesRequest; import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; /** @@ -61,12 +58,7 @@ public void init(AtomicBoolean stop) throws Exception { } @Override Table resolveTable(CompactionInfo ci) throws MetaException { - try { - return msc.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); - } catch (TException e) { - LOG.error("Unable to find table " + ci.getFullTableName(), e); - throw new MetaException(e.toString()); - } + return RemoteCompactorUtil.resolveTable(conf, msc, ci); } @Override boolean replIsCompactionDisabledForDatabase(String dbName) throws TException { @@ -81,13 +73,11 @@ public void init(AtomicBoolean stop) throws Exception { } @Override List getPartitionsByNames(CompactionInfo ci) throws MetaException { - try { - GetPartitionsByNamesRequest req = convertToGetPartitionsByNamesRequest(ci.dbname, ci.tableName, - Collections.singletonList(ci.partName)); - return msc.getPartitionsByNames(req).getPartitions(); - } catch (TException e) { - LOG.error("Unable to get partitions by name for CompactionInfo=" + ci); - throw new MetaException(e.toString()); - } + return RemoteCompactorUtil.getPartitionsByNames(msc, ci.dbname, ci.tableName, ci.tableName); + } + + protected Partition resolvePartition(CompactionInfo ci) throws MetaException { + return CompactorUtil.resolvePartition(conf, msc, ci.dbname, ci.tableName, ci.partName, + CompactorUtil.METADATA_FETCH_MODE.REMOTE); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorUtil.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorUtil.java new file mode 100644 index 000000000000..c7f20a93605a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/RemoteCompactorUtil.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.txn.compactor; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; +import java.util.List; + +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.convertToGetPartitionsByNamesRequest; +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + +public class RemoteCompactorUtil { + + static final private String CLASS_NAME = RemoteCompactorUtil.class.getName(); + protected static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME); + + public static Table resolveTable(HiveConf conf, IMetaStoreClient msc, CompactionInfo ci) throws MetaException { + try { + return msc.getTable(getDefaultCatalog(conf), ci.dbname, ci.tableName); + } catch (TException e) { + LOG.error("Unable to find table " + ci.getFullTableName(), e); + throw new MetaException(e.toString()); + } + } + + public static List getPartitionsByNames(IMetaStoreClient msc, String dbName, String tableName, + String partName) throws MetaException { + try { + GetPartitionsByNamesRequest req = convertToGetPartitionsByNamesRequest(dbName, tableName, + Collections.singletonList(partName)); + return msc.getPartitionsByNames(req).getPartitions(); + } catch (TException e) { + LOG.error("Unable to get partitions by name = {}.{}.{}", dbName, tableName, partName); + throw new MetaException(e.toString()); + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/StatsUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/StatsUpdater.java index 50d04f8b1b29..c5b7b52b2a9b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/StatsUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/StatsUpdater.java @@ -21,8 +21,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.ql.DriverUtils; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.stats.StatsUtils; @@ -78,7 +77,7 @@ public void gatherStats(CompactionInfo ci, HiveConf conf, sb.append(")"); } sb.append(" compute statistics"); - if (!conf.getBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER) && ci.isMajorCompaction()) { + if (!conf.getBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER) && ci.isMajorCompaction()) { List columnList = msc.findColumnsWithStats(CompactionInfo.compactionInfoToStruct(ci)); if (!columnList.isEmpty()) { sb.append(" for columns ").append(String.join(",", columnList)); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 459b7d2bbd57..b39236ef8b9a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -21,48 +21,33 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.MetaStoreThread; -import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest; -import org.apache.hadoop.hive.metastore.api.LockRequest; -import org.apache.hadoop.hive.metastore.api.LockType; -import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.LockState; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; -import org.apache.hadoop.hive.metastore.txn.TxnStatus; +import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus; import org.apache.hadoop.hive.ql.io.AcidDirectory; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.log.PerfLogger; -import org.apache.hive.common.util.Ref; +import org.apache.hadoop.hive.ql.txn.compactor.service.CompactionService; +import org.apache.hadoop.hive.ql.txn.compactor.service.CompactionExecutorFactory; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.security.UserGroupInformation; import java.io.IOException; import java.security.PrivilegedExceptionAction; -import java.util.Collections; -import java.util.Map; +import java.util.Optional; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -70,7 +55,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; /** * A class to do compactions. This will run in a separate thread. It will spin on the @@ -170,66 +154,6 @@ public void init(AtomicBoolean stop) throws Exception { setName(workerName); } - /** - * Determine if compaction can run in a specified directory. - * @param ci {@link CompactionInfo} - * @param dir the delta directory - * @param sd resolved storage descriptor - * @return true, if compaction can run. - */ - static boolean isEnoughToCompact(CompactionInfo ci, AcidDirectory dir, StorageDescriptor sd) { - int deltaCount = dir.getCurrentDirectories().size(); - int origCount = dir.getOriginalFiles().size(); - - StringBuilder deltaInfo = new StringBuilder().append(deltaCount); - boolean isEnoughToCompact; - - if (ci.isRebalanceCompaction()) { - //TODO: For now, we are allowing rebalance compaction regardless of the table state. Thresholds will be added later. - return true; - } else if (ci.isMajorCompaction()) { - isEnoughToCompact = - (origCount > 0 || deltaCount + (dir.getBaseDirectory() == null ? 0 : 1) > 1); - - } else { - isEnoughToCompact = (deltaCount > 1); - - if (deltaCount == 2) { - Map deltaByType = dir.getCurrentDirectories().stream().collect(Collectors - .groupingBy(delta -> (delta - .isDeleteDelta() ? AcidUtils.DELETE_DELTA_PREFIX : AcidUtils.DELTA_PREFIX), - Collectors.counting())); - - isEnoughToCompact = (deltaByType.size() != deltaCount); - deltaInfo.append(" ").append(deltaByType); - } - } - - if (!isEnoughToCompact) { - LOG.info("Not enough files in {} to compact; current base: {}, delta files: {}, originals: {}", - sd.getLocation(), dir.getBaseDirectory(), deltaInfo, origCount); - } - return isEnoughToCompact; - } - - /** - * Check for obsolete directories, and return true if any exist and Cleaner should be - * run. For example if we insert overwrite into a table with only deltas, a new base file with - * the highest writeId is created so there will be no live delta directories, only obsolete - * ones. Compaction is not needed, but the cleaner should still be run. - * - * @return true if cleaning is needed - */ - public static boolean needsCleaning(AcidDirectory dir, StorageDescriptor sd) { - int numObsoleteDirs = dir.getObsolete().size() + dir.getAbortedDirectories().size(); - boolean needsJustCleaning = numObsoleteDirs > 0; - if (needsJustCleaning) { - LOG.info("{} obsolete directories in {} found; marked for cleaning.", numObsoleteDirs, - sd.getLocation()); - } - return needsJustCleaning; - } - /** * Creates a single threaded executor used for handling timeouts. * The thread settings are inherited from the current thread. @@ -258,10 +182,10 @@ protected Boolean findNextCompactionAndExecute(boolean collectGenericStats, bool // so wrap it in a big catch Throwable statement. PerfLogger perfLogger = SessionState.getPerfLogger(false); String workerMetric = null; - CompactionInfo ci = null; - boolean computeStats = false; Table table = null; + CompactionService compactionService = null; + boolean compactionResult = false; // If an exception is thrown in the try-with-resources block below, msc is closed and nulled, so a new instance // is need to be obtained here. @@ -274,7 +198,7 @@ protected Boolean findNextCompactionAndExecute(boolean collectGenericStats, bool } } - try (CompactionTxn compactionTxn = new CompactionTxn()) { + try { FindNextCompactRequest findNextCompactRequest = new FindNextCompactRequest(); findNextCompactRequest.setWorkerId(workerName); @@ -297,7 +221,7 @@ protected Boolean findNextCompactionAndExecute(boolean collectGenericStats, bool LOG.warn("The returned compaction request ({}) belong to a different pool. Although the worker is assigned to the {} pool," + " it will process the request.", ci, getPoolName()); } - checkInterrupt(); + CompactorUtil.checkInterrupt(CLASS_NAME); if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { workerMetric = MetricsConstants.COMPACTION_WORKER_CYCLE + "_" + @@ -321,158 +245,27 @@ protected Boolean findNextCompactionAndExecute(boolean collectGenericStats, bool return false; } - if (ci.isRebalanceCompaction() && table.getSd().getNumBuckets() > 0) { - LOG.error("Cannot execute rebalancing compaction on bucketed tables."); - ci.errorMessage = "Cannot execute rebalancing compaction on bucketed tables."; - msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); - return false; - } - - if (!ci.type.equals(CompactionType.REBALANCE) && ci.numberOfBuckets > 0) { - if (LOG.isWarnEnabled()) { - LOG.warn("Only the REBALANCE compaction accepts the number of buckets clause (CLUSTERED INTO {N} BUCKETS). " + - "Since the compaction request is {}, it will be ignored.", ci.type); - } - } - - checkInterrupt(); - - String fullTableName = TxnUtils.getFullTableName(table.getDbName(), table.getTableName()); - - - // Find the partition we will be working with, if there is one. - Partition p; - try { - p = resolvePartition(ci); - if (p == null && ci.partName != null) { - ci.errorMessage = "Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped and moving on."; - LOG.warn(ci.errorMessage + " Compaction info: {}", ci); - msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); - return false; - } - } catch (Exception e) { - LOG.error("Unexpected error during resolving partition.", e); - ci.errorMessage = e.getMessage(); - msc.markFailed(CompactionInfo.compactionInfoToStruct(ci)); - return false; - } - - checkInterrupt(); - - // Find the appropriate storage descriptor - final StorageDescriptor sd = resolveStorageDescriptor(table, p); - - // Check that the table or partition isn't sorted, as we don't yet support that. - if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) { - ci.errorMessage = "Attempt to compact sorted table " + ci.getFullTableName() + ", which is not yet supported!"; - LOG.warn(ci.errorMessage + " Compaction info: {}", ci); - msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); - return false; - } - - if (ci.runAs == null) { - ci.runAs = TxnUtils.findUserToRunAs(sd.getLocation(), table, conf); - } - - checkInterrupt(); - - /** - * we cannot have Worker use HiveTxnManager (which is on ThreadLocal) since - * then the Driver would already have the an open txn but then this txn would have - * multiple statements in it (for query based compactor) which is not supported (and since - * this case some of the statements are DDL, even in the future will not be allowed in a - * multi-stmt txn. {@link Driver#setCompactionWriteIds(ValidWriteIdList, long)} */ - compactionTxn.open(ci); - - ValidTxnList validTxnList = msc.getValidTxns(compactionTxn.getTxnId()); - //with this ValidWriteIdList is capped at whatever HWM validTxnList has - final ValidCompactorWriteIdList tblValidWriteIds = - TxnUtils.createValidCompactWriteIdList(msc.getValidWriteIds( - Collections.singletonList(fullTableName), validTxnList.writeToString()).get(0)); - LOG.debug("ValidCompactWriteIdList: " + tblValidWriteIds.writeToString()); - conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList.writeToString()); - - ci.highestWriteId = tblValidWriteIds.getHighWatermark(); - //this writes TXN_COMPONENTS to ensure that if compactorTxnId fails, we keep metadata about - //it until after any data written by it are physically removed - msc.updateCompactorState(CompactionInfo.compactionInfoToStruct(ci), compactionTxn.getTxnId()); - - checkInterrupt(); - - // Don't start compaction or cleaning if not necessary - if (isDynPartAbort(table, ci)) { - msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); - compactionTxn.wasSuccessful(); - return false; - } - AcidDirectory dir = getAcidStateForWorker(ci, sd, tblValidWriteIds); - if (!isEnoughToCompact(ci, dir, sd)) { - if (needsCleaning(dir, sd)) { - msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); - } else { - // do nothing - ci.errorMessage = "None of the compaction thresholds met, compaction request is refused!"; - LOG.debug(ci.errorMessage + " Compaction info: {}", ci); - msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); - } - compactionTxn.wasSuccessful(); - return false; - } - if (!ci.isMajorCompaction() && !isMinorCompactionSupported(table.getParameters(), dir)) { - ci.errorMessage = "Query based Minor compaction is not possible for full acid tables having raw format " + - "(non-acid) data in them."; - LOG.error(ci.errorMessage + " Compaction info: {}", ci); - try { - msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); - } catch (Throwable tr) { - LOG.error("Caught an exception while trying to mark compaction {} as failed: {}", ci, tr); - } - return false; - } - checkInterrupt(); + CompactorUtil.checkInterrupt(CLASS_NAME); + compactionService = CompactionExecutorFactory.createExecutor(conf, msc, compactorFactory, table, collectGenericStats, collectMrStats); try { - failCompactionIfSetForTest(); - - /* - First try to run compaction via HiveQL queries. - Compaction for MM tables happens here, or run compaction for Crud tables if query-based compaction is enabled. - todo Find a more generic approach to collecting files in the same logical bucket to compact within the same - task (currently we're using Tez split grouping). - */ - CompactorPipeline compactorPipeline = compactorFactory.getCompactorPipeline(table, conf, ci, msc); - computeStats = (compactorPipeline.isMRCompaction() && collectMrStats) || collectGenericStats; - - LOG.info("Starting " + ci.type.toString() + " compaction for " + ci.getFullPartitionName() + ", id:" + - ci.id + " in " + compactionTxn + " with compute stats set to " + computeStats); - - CompactorContext compactorContext = new CompactorContext(conf, table, p, sd, tblValidWriteIds, ci, dir); - compactorPipeline.execute(compactorContext); - - LOG.info("Completed " + ci.type.toString() + " compaction for " + ci.getFullPartitionName() + " in " - + compactionTxn + ", marking as compacted."); - msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); - compactionTxn.wasSuccessful(); - - AcidMetricService.updateMetricsFromWorker(ci.dbname, ci.tableName, ci.partName, ci.type, - dir.getCurrentDirectories().size(), dir.getDeleteDeltas().size(), conf, msc); + compactionResult = compactionService.compact(table, ci); } catch (Throwable e) { LOG.error("Caught exception while trying to compact " + ci + ". Marking failed to avoid repeated failures", e); - final CompactionType ctype = ci.type; markFailed(ci, e.getMessage()); - computeStats = false; - - if (runJobAsSelf(ci.runAs)) { - cleanupResultDirs(sd, tblValidWriteIds, ctype, dir); + if (CompactorUtil.runJobAsSelf(ci.runAs)) { + compactionService.cleanupResultDirs(ci); } else { LOG.info("Cleaning as user " + ci.runAs); UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, UserGroupInformation.getLoginUser()); + CompactionService finalCompactionService = compactionService; + CompactionInfo finalCi = ci; ugi.doAs((PrivilegedExceptionAction) () -> { - cleanupResultDirs(sd, tblValidWriteIds, ctype, dir); + finalCompactionService.cleanupResultDirs(finalCi); return null; }); try { @@ -499,64 +292,12 @@ protected Boolean findNextCompactionAndExecute(boolean collectGenericStats, bool } } - if (computeStats) { - statsUpdater.gatherStats(ci, conf, runJobAsSelf(ci.runAs) ? ci.runAs : table.getOwner(), - CompactorUtil.getCompactorJobQueueName(conf, ci, table), msc); - } - return true; - } - - /** - * Just AcidUtils.getAcidState, but with impersonation if needed. - */ - private AcidDirectory getAcidStateForWorker(CompactionInfo ci, StorageDescriptor sd, - ValidCompactorWriteIdList tblValidWriteIds) throws IOException, InterruptedException { - if (runJobAsSelf(ci.runAs)) { - return AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, - tblValidWriteIds, Ref.from(false), true); + if (Optional.ofNullable(compactionService).map(CompactionService::isComputeStats).orElse(false)) { + statsUpdater.gatherStats(ci, conf, CompactorUtil.runJobAsSelf(ci.runAs) ? ci.runAs : table.getOwner(), + CompactorUtil.getCompactorJobQueueName(conf, ci, table), msc); } - UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, UserGroupInformation.getLoginUser()); - try { - return ugi.doAs((PrivilegedExceptionAction) () -> - AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, tblValidWriteIds, - Ref.from(false), true)); - } finally { - try { - FileSystem.closeAllForUGI(ugi); - } catch (IOException exception) { - LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + ci.getFullPartitionName(), - exception); - } - } - } - - private void cleanupResultDirs(StorageDescriptor sd, ValidWriteIdList writeIds, CompactionType ctype, AcidDirectory dir) { - // result directory for compactor to write new files - Path resultDir = QueryCompactor.Util.getCompactionResultDir(sd, writeIds, conf, - ctype == CompactionType.MAJOR, false, false, dir); - LOG.info("Deleting result directories created by the compactor:\n"); - try { - FileSystem fs = resultDir.getFileSystem(conf); - LOG.info(resultDir.toString()); - fs.delete(resultDir, true); - - if (ctype == CompactionType.MINOR) { - Path deleteDeltaDir = QueryCompactor.Util.getCompactionResultDir(sd, writeIds, conf, - false, true, false, dir); - - LOG.info(deleteDeltaDir.toString()); - fs.delete(deleteDeltaDir, true); - } - } catch (IOException ex) { - LOG.error("Caught exception while cleaning result directories:", ex); - } - } - - private void failCompactionIfSetForTest() { - if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION)) { - throw new RuntimeException(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION.name() + "=true"); - } + return compactionResult; } private void markFailed(CompactionInfo ci, String errorMessage) { @@ -578,107 +319,10 @@ private void markFailed(CompactionInfo ci, String errorMessage) { } } - private static boolean isDynPartAbort(Table t, CompactionInfo ci) { - return t.getPartitionKeys() != null && t.getPartitionKeys().size() > 0 - && ci.partName == null; - } - private String getWorkerId() { StringBuilder name = new StringBuilder(this.hostName); name.append("-"); name.append(getId()); return name.toString(); } - - /** - * Keep track of the compaction's transaction and its operations. - */ - class CompactionTxn implements AutoCloseable { - private long txnId = 0; - private long lockId = 0; - - private TxnStatus status = TxnStatus.UNKNOWN; - private boolean successfulCompaction = false; - - /** - * Try to open a new txn. - * @throws TException - */ - void open(CompactionInfo ci) throws TException { - this.txnId = msc.openTxn(ci.runAs, ci.type == CompactionType.REBALANCE ? TxnType.REBALANCE_COMPACTION : TxnType.COMPACTION); - status = TxnStatus.OPEN; - - LockRequest lockRequest; - if (CompactionType.REBALANCE.equals(ci.type)) { - lockRequest = createLockRequest(ci, txnId, LockType.EXCL_WRITE, DataOperationType.UPDATE); - } else { - lockRequest = createLockRequest(ci, txnId, LockType.SHARED_READ, DataOperationType.SELECT); - } - LockResponse res = msc.lock(lockRequest); - if (res.getState() != LockState.ACQUIRED) { - throw new TException("Unable to acquire lock(s) on {" + ci.getFullPartitionName() - + "}, status {" + res.getState() + "}, reason {" + res.getErrorMessage() + "}"); - } - lockId = res.getLockid(); - CompactionHeartbeatService.getInstance(conf).startHeartbeat(txnId, lockId, TxnUtils.getFullTableName(ci.dbname, ci.tableName)); - } - - /** - * Mark compaction as successful. This means the txn will be committed; otherwise it will be aborted. - */ - void wasSuccessful() { - this.successfulCompaction = true; - } - - /** - * Commit or abort txn. - * @throws Exception - */ - @Override public void close() throws Exception { - if (status == TxnStatus.UNKNOWN) { - return; - } - try { - //the transaction is about to close, we can stop heartbeating regardless of it's state - CompactionHeartbeatService.getInstance(conf).stopHeartbeat(txnId); - } finally { - if (successfulCompaction) { - commit(); - } else { - abort(); - } - } - } - - long getTxnId() { - return txnId; - } - - @Override public String toString() { - return "txnId=" + txnId + ", lockId=" + lockId + " (TxnStatus: " + status + ")"; - } - - /** - * Commit the txn if open. - */ - private void commit() throws TException { - if (status == TxnStatus.OPEN) { - msc.commitTxn(txnId); - status = TxnStatus.COMMITTED; - } - } - - /** - * Abort the txn if open. - */ - private void abort() throws TException { - if (status == TxnStatus.OPEN) { - AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId); - abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); - msc.rollbackTxn(abortTxnRequest); - status = TxnStatus.ABORTED; - } - } - } - } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/AbortedTxnCleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/AbortedTxnCleaner.java index acd4519d0fe3..2314ce4d2e4b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/AbortedTxnCleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/AbortedTxnCleaner.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.metrics.PerfLogger; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.ql.txn.compactor.CompactorUtil; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/CompactionCleaner.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/CompactionCleaner.java index c2be268b6f9a..721e3ea2266e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/CompactionCleaner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/CompactionCleaner.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.metrics.PerfLogger; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java index 8a105c0bc206..f4d0a5adc15f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/handler/TaskHandler.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.io.AcidDirectory; @@ -46,7 +46,6 @@ import java.io.IOException; import java.util.Arrays; -import java.util.BitSet; import java.util.Collections; import java.util.List; import java.util.Map; @@ -88,27 +87,7 @@ protected Table resolveTable(String dbName, String tableName) throws MetaExcepti } protected Partition resolvePartition(String dbName, String tableName, String partName) throws MetaException { - if (partName != null) { - List parts; - try { - parts = CompactorUtil.getPartitionsByNames(conf, dbName, tableName, partName); - if (parts == null || parts.isEmpty()) { - // The partition got dropped before we went looking for it. - return null; - } - } catch (Exception e) { - LOG.error("Unable to find partition: {}.{}.{}", dbName, tableName, partName, e); - throw e; - } - if (parts.size() != 1) { - LOG.error("{}.{}.{} does not refer to a single partition. {}", dbName, tableName, partName, - Arrays.toString(parts.toArray())); - throw new MetaException(String.join("Too many partitions for : ", dbName, tableName, partName)); - } - return parts.get(0); - } else { - return null; - } + return CompactorUtil.resolvePartition(conf, null, dbName, tableName, partName, CompactorUtil.METADATA_FETCH_MODE.LOCAL); } protected ValidReaderWriteIdList getValidCleanerWriteIdList(CompactionInfo info, ValidTxnList validTxnList) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java new file mode 100644 index 000000000000..b4143f9dd184 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/AcidCompactionService.java @@ -0,0 +1,420 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.txn.compactor.service; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.DataOperationType; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TxnType; +import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; +import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus; +import org.apache.hadoop.hive.ql.Driver; +import org.apache.hadoop.hive.ql.io.AcidDirectory; +import org.apache.hadoop.hive.ql.io.AcidUtils; +import org.apache.hadoop.hive.ql.txn.compactor.CompactionHeartbeatService; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorContext; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorFactory; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorPipeline; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorUtil; +import org.apache.hadoop.hive.ql.txn.compactor.QueryCompactor; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hive.common.util.Ref; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Collectors; + +public class AcidCompactionService extends CompactionService { + static final private String CLASS_NAME = AcidCompactionService.class.getName(); + static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME); + + private final boolean collectMrStats; + private StorageDescriptor sd; + private ValidCompactorWriteIdList tblValidWriteIds; + private AcidDirectory dir; + + public AcidCompactionService(HiveConf conf, IMetaStoreClient msc, CompactorFactory compactorFactory, + boolean collectGenericStats, boolean collectMrStats) { + super(conf, msc, compactorFactory, collectGenericStats); + this.collectMrStats = collectMrStats; + } + + /** + * Just AcidUtils.getAcidState, but with impersonation if needed. + */ + private AcidDirectory getAcidStateForWorker(CompactionInfo ci, StorageDescriptor sd, + ValidCompactorWriteIdList tblValidWriteIds) throws IOException, InterruptedException { + if (CompactorUtil.runJobAsSelf(ci.runAs)) { + return AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, + tblValidWriteIds, Ref.from(false), true); + } + + UserGroupInformation ugi = UserGroupInformation.createProxyUser(ci.runAs, UserGroupInformation.getLoginUser()); + try { + return ugi.doAs((PrivilegedExceptionAction) () -> + AcidUtils.getAcidState(null, new Path(sd.getLocation()), conf, tblValidWriteIds, + Ref.from(false), true)); + } finally { + try { + FileSystem.closeAllForUGI(ugi); + } catch (IOException exception) { + LOG.error("Could not clean up file-system handles for UGI: " + ugi + " for " + ci.getFullPartitionName(), + exception); + } + } + } + + public void cleanupResultDirs(CompactionInfo ci) { + // result directory for compactor to write new files + Path resultDir = QueryCompactor.Util.getCompactionResultDir(sd, tblValidWriteIds, conf, + ci.type == CompactionType.MAJOR, false, false, dir); + LOG.info("Deleting result directories created by the compactor:\n"); + try { + FileSystem fs = resultDir.getFileSystem(conf); + LOG.info(resultDir.toString()); + fs.delete(resultDir, true); + + if (ci.type == CompactionType.MINOR) { + Path deleteDeltaDir = QueryCompactor.Util.getCompactionResultDir(sd, tblValidWriteIds, conf, + false, true, false, dir); + + LOG.info(deleteDeltaDir.toString()); + fs.delete(deleteDeltaDir, true); + } + } catch (IOException ex) { + LOG.error("Caught exception while cleaning result directories:", ex); + } + } + + public Boolean compact(Table table, CompactionInfo ci) throws Exception { + + try (CompactionTxn compactionTxn = new CompactionTxn()) { + + if (ci.isRebalanceCompaction() && table.getSd().getNumBuckets() > 0) { + LOG.error("Cannot execute rebalancing compaction on bucketed tables."); + ci.errorMessage = "Cannot execute rebalancing compaction on bucketed tables."; + msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); + return false; + } + + if (!ci.type.equals(CompactionType.REBALANCE) && ci.numberOfBuckets > 0) { + if (LOG.isWarnEnabled()) { + LOG.warn("Only the REBALANCE compaction accepts the number of buckets clause (CLUSTERED INTO {N} BUCKETS). " + + "Since the compaction request is {}, it will be ignored.", ci.type); + } + } + + String fullTableName = TxnUtils.getFullTableName(table.getDbName(), table.getTableName()); + + // Find the partition we will be working with, if there is one. + Partition p; + try { + p = CompactorUtil.resolvePartition(conf, msc, ci.dbname, ci.tableName, ci.partName, + CompactorUtil.METADATA_FETCH_MODE.REMOTE); + if (p == null && ci.partName != null) { + ci.errorMessage = "Unable to find partition " + ci.getFullPartitionName() + ", assuming it was dropped and moving on."; + LOG.warn(ci.errorMessage + " Compaction info: {}", ci); + msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); + return false; + } + } catch (Exception e) { + LOG.error("Unexpected error during resolving partition.", e); + ci.errorMessage = e.getMessage(); + msc.markFailed(CompactionInfo.compactionInfoToStruct(ci)); + return false; + } + + CompactorUtil.checkInterrupt(CLASS_NAME); + + // Find the appropriate storage descriptor + sd = CompactorUtil.resolveStorageDescriptor(table, p); + + if (isTableSorted(sd, ci)) { + return false; + } + + if (ci.runAs == null) { + ci.runAs = TxnUtils.findUserToRunAs(sd.getLocation(), table, conf); + } + + CompactorUtil.checkInterrupt(CLASS_NAME); + + /** + * we cannot have Worker use HiveTxnManager (which is on ThreadLocal) since + * then the Driver would already have the an open txn but then this txn would have + * multiple statements in it (for query based compactor) which is not supported (and since + * this case some of the statements are DDL, even in the future will not be allowed in a + * multi-stmt txn. {@link Driver#setCompactionWriteIds(ValidWriteIdList, long)} */ + compactionTxn.open(ci); + + ValidTxnList validTxnList = msc.getValidTxns(compactionTxn.getTxnId()); + //with this ValidWriteIdList is capped at whatever HWM validTxnList has + tblValidWriteIds = TxnUtils.createValidCompactWriteIdList(msc.getValidWriteIds( + Collections.singletonList(fullTableName), validTxnList.writeToString()).get(0)); + LOG.debug("ValidCompactWriteIdList: " + tblValidWriteIds.writeToString()); + conf.set(ValidTxnList.VALID_TXNS_KEY, validTxnList.writeToString()); + + ci.highestWriteId = tblValidWriteIds.getHighWatermark(); + //this writes TXN_COMPONENTS to ensure that if compactorTxnId fails, we keep metadata about + //it until after any data written by it are physically removed + msc.updateCompactorState(CompactionInfo.compactionInfoToStruct(ci), compactionTxn.getTxnId()); + + CompactorUtil.checkInterrupt(CLASS_NAME); + + // Don't start compaction or cleaning if not necessary + if (isDynPartAbort(table, ci)) { + msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); + compactionTxn.wasSuccessful(); + return false; + } + dir = getAcidStateForWorker(ci, sd, tblValidWriteIds); + if (!isEnoughToCompact(ci, dir, sd)) { + if (needsCleaning(dir, sd)) { + msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); + } else { + // do nothing + ci.errorMessage = "None of the compaction thresholds met, compaction request is refused!"; + LOG.debug(ci.errorMessage + " Compaction info: {}", ci); + msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); + } + compactionTxn.wasSuccessful(); + return false; + } + if (!ci.isMajorCompaction() && !CompactorUtil.isMinorCompactionSupported(conf, table.getParameters(), dir)) { + ci.errorMessage = "Query based Minor compaction is not possible for full acid tables having raw format " + + "(non-acid) data in them."; + LOG.error(ci.errorMessage + " Compaction info: {}", ci); + try { + msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); + } catch (Throwable tr) { + LOG.error("Caught an exception while trying to mark compaction {} as failed: {}", ci, tr); + } + return false; + } + CompactorUtil.checkInterrupt(CLASS_NAME); + + try { + failCompactionIfSetForTest(); + + /* + First try to run compaction via HiveQL queries. + Compaction for MM tables happens here, or run compaction for Crud tables if query-based compaction is enabled. + todo Find a more generic approach to collecting files in the same logical bucket to compact within the same + task (currently we're using Tez split grouping). + */ + CompactorPipeline compactorPipeline = compactorFactory.getCompactorPipeline(table, conf, ci, msc); + computeStats = (compactorPipeline.isMRCompaction() && collectMrStats) || collectGenericStats; + + LOG.info("Starting " + ci.type.toString() + " compaction for " + ci.getFullPartitionName() + ", id:" + + ci.id + " in " + compactionTxn + " with compute stats set to " + computeStats); + + CompactorContext compactorContext = new CompactorContext(conf, table, p, sd, tblValidWriteIds, ci, dir); + compactorPipeline.execute(compactorContext); + + LOG.info("Completed " + ci.type.toString() + " compaction for " + ci.getFullPartitionName() + " in " + + compactionTxn + ", marking as compacted."); + msc.markCompacted(CompactionInfo.compactionInfoToStruct(ci)); + compactionTxn.wasSuccessful(); + + AcidMetricService.updateMetricsFromWorker(ci.dbname, ci.tableName, ci.partName, ci.type, + dir.getCurrentDirectories().size(), dir.getDeleteDeltas().size(), conf, msc); + } catch (Throwable e) { + computeStats = false; + throw e; + } + + return true; + } catch (Exception e) { + LOG.error("Caught exception in " + CLASS_NAME + " while trying to compact " + ci, e); + throw e; + } + } + + /** + * Determine if compaction can run in a specified directory. + * @param ci {@link CompactionInfo} + * @param dir the delta directory + * @param sd resolved storage descriptor + * @return true, if compaction can run. + */ + static boolean isEnoughToCompact(CompactionInfo ci, AcidDirectory dir, StorageDescriptor sd) { + int deltaCount = dir.getCurrentDirectories().size(); + int origCount = dir.getOriginalFiles().size(); + + StringBuilder deltaInfo = new StringBuilder().append(deltaCount); + boolean isEnoughToCompact; + + if (ci.isRebalanceCompaction()) { + //TODO: For now, we are allowing rebalance compaction regardless of the table state. Thresholds will be added later. + return true; + } else if (ci.isMajorCompaction()) { + isEnoughToCompact = + (origCount > 0 || deltaCount + (dir.getBaseDirectory() == null ? 0 : 1) > 1); + + } else { + isEnoughToCompact = (deltaCount > 1); + + if (deltaCount == 2) { + Map deltaByType = dir.getCurrentDirectories().stream().collect(Collectors + .groupingBy(delta -> (delta + .isDeleteDelta() ? AcidUtils.DELETE_DELTA_PREFIX : AcidUtils.DELTA_PREFIX), + Collectors.counting())); + + isEnoughToCompact = (deltaByType.size() != deltaCount); + deltaInfo.append(" ").append(deltaByType); + } + } + + if (!isEnoughToCompact) { + LOG.info("Not enough files in {} to compact; current base: {}, delta files: {}, originals: {}", + sd.getLocation(), dir.getBaseDirectory(), deltaInfo, origCount); + } + return isEnoughToCompact; + } + + /** + * Check for obsolete directories, and return true if any exist and Cleaner should be + * run. For example if we insert overwrite into a table with only deltas, a new base file with + * the highest writeId is created so there will be no live delta directories, only obsolete + * ones. Compaction is not needed, but the cleaner should still be run. + * + * @return true if cleaning is needed + */ + public static boolean needsCleaning(AcidDirectory dir, StorageDescriptor sd) { + int numObsoleteDirs = dir.getObsolete().size() + dir.getAbortedDirectories().size(); + boolean needsJustCleaning = numObsoleteDirs > 0; + if (needsJustCleaning) { + LOG.info("{} obsolete directories in {} found; marked for cleaning.", numObsoleteDirs, + sd.getLocation()); + } + return needsJustCleaning; + } + + /** + * Keep track of the compaction's transaction and its operations. + */ + class CompactionTxn implements AutoCloseable { + private long txnId = 0; + private long lockId = 0; + + private TxnStatus status = TxnStatus.UNKNOWN; + private boolean successfulCompaction = false; + + /** + * Try to open a new txn. + * @throws TException + */ + void open(CompactionInfo ci) throws TException { + this.txnId = msc.openTxn(ci.runAs, ci.type == CompactionType.REBALANCE ? TxnType.REBALANCE_COMPACTION : TxnType.COMPACTION); + status = TxnStatus.OPEN; + + LockRequest lockRequest; + if (CompactionType.REBALANCE.equals(ci.type)) { + lockRequest = CompactorUtil.createLockRequest(conf, ci, txnId, LockType.EXCL_WRITE, DataOperationType.UPDATE); + } else { + lockRequest = CompactorUtil.createLockRequest(conf, ci, txnId, LockType.SHARED_READ, DataOperationType.SELECT); + } + LockResponse res = msc.lock(lockRequest); + if (res.getState() != LockState.ACQUIRED) { + throw new TException("Unable to acquire lock(s) on {" + ci.getFullPartitionName() + + "}, status {" + res.getState() + "}, reason {" + res.getErrorMessage() + "}"); + } + lockId = res.getLockid(); + CompactionHeartbeatService.getInstance(conf).startHeartbeat(txnId, lockId, TxnUtils.getFullTableName(ci.dbname, ci.tableName)); + } + + /** + * Mark compaction as successful. This means the txn will be committed; otherwise it will be aborted. + */ + void wasSuccessful() { + this.successfulCompaction = true; + } + + /** + * Commit or abort txn. + * @throws Exception + */ + @Override public void close() throws Exception { + if (status == TxnStatus.UNKNOWN) { + return; + } + try { + //the transaction is about to close, we can stop heartbeating regardless of it's state + CompactionHeartbeatService.getInstance(conf).stopHeartbeat(txnId); + } finally { + if (successfulCompaction) { + commit(); + } else { + abort(); + } + } + } + + long getTxnId() { + return txnId; + } + + @Override public String toString() { + return "txnId=" + txnId + ", lockId=" + lockId + " (TxnStatus: " + status + ")"; + } + + /** + * Commit the txn if open. + */ + private void commit() throws TException { + if (status == TxnStatus.OPEN) { + msc.commitTxn(txnId); + status = TxnStatus.COMMITTED; + } + } + + /** + * Abort the txn if open. + */ + private void abort() throws TException { + if (status == TxnStatus.OPEN) { + AbortTxnRequest abortTxnRequest = new AbortTxnRequest(txnId); + abortTxnRequest.setErrorCode(TxnErrorMsg.ABORT_COMPACTION_TXN.getErrorCode()); + msc.rollbackTxn(abortTxnRequest); + status = TxnStatus.ABORTED; + } + } + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionExecutorFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionExecutorFactory.java new file mode 100644 index 000000000000..56536d9692b9 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionExecutorFactory.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.txn.compactor.service; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorFactory; + +public class CompactionExecutorFactory { + + private static final String ICEBERG_COMPACTION_SERVICE_CLASS = "org.apache.iceberg.mr.hive.compaction.IcebergCompactionService"; + + public static CompactionService createExecutor(HiveConf conf, IMetaStoreClient msc, CompactorFactory compactorFactory, + Table table, boolean collectGenericStats, boolean collectMrStats) throws HiveException { + + CompactionService compactionService; + + if (MetaStoreUtils.isIcebergTable(table.getParameters())) { + + try { + Class icebergCompactionService = (Class) + Class.forName(ICEBERG_COMPACTION_SERVICE_CLASS, true, + Utilities.getSessionSpecifiedClassLoader()); + + compactionService = icebergCompactionService.newInstance(); + compactionService.init(conf, msc, compactorFactory, collectGenericStats); + } + catch (Exception e) { + throw new HiveException("Failed instantiating and calling Iceberg compaction executor", e); + } + } + else { + compactionService = new AcidCompactionService(conf, msc, compactorFactory, collectGenericStats, + collectMrStats); + } + + return compactionService; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java new file mode 100644 index 000000000000..9d1885ad9c3a --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/service/CompactionService.java @@ -0,0 +1,86 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.txn.compactor.service; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.hadoop.hive.ql.txn.compactor.CompactorFactory; +import org.apache.thrift.TException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +public abstract class CompactionService { + + static final private String CLASS_NAME = CompactionService.class.getName(); + static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME); + + protected IMetaStoreClient msc; + protected HiveConf conf; + protected CompactorFactory compactorFactory; + protected boolean collectGenericStats; + protected boolean computeStats = false; + + public CompactionService(HiveConf conf, IMetaStoreClient msc, CompactorFactory compactorFactory, + boolean collectGenericStats) { + init(conf, msc, compactorFactory, collectGenericStats); + } + + public CompactionService() { + } + + public void init(HiveConf conf, IMetaStoreClient msc, CompactorFactory compactorFactory, + boolean collectGenericStats) { + this.conf = conf; + this.msc = msc; + this.compactorFactory = compactorFactory; + this.collectGenericStats = collectGenericStats; + } + + public abstract Boolean compact(Table table, CompactionInfo ci) throws Exception; + abstract public void cleanupResultDirs(CompactionInfo ci); + + public boolean isComputeStats() { + return computeStats; + } + + protected boolean isDynPartAbort(Table t, CompactionInfo ci) { + return t.getPartitionKeys() != null && t.getPartitionKeys().size() > 0 + && ci.partName == null; + } + + protected void failCompactionIfSetForTest() { + if(conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST) && conf.getBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION)) { + throw new RuntimeException(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION.name() + "=true"); + } + } + + protected boolean isTableSorted(StorageDescriptor sd, CompactionInfo ci) throws TException { + // Check that the table or partition isn't sorted, as we don't yet support that. + if (sd.getSortCols() != null && !sd.getSortCols().isEmpty()) { + ci.errorMessage = "Attempt to compact sorted table " + ci.getFullTableName() + ", which is not yet supported!"; + LOG.warn(ci.errorMessage + " Compaction info: {}", ci); + msc.markRefused(CompactionInfo.compactionInfoToStruct(ci)); + return true; + } + return false; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFArrayBase.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFArrayBase.java index 22ad5b8aa6ee..a21cc4e5f96e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFArrayBase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/AbstractGenericUDFArrayBase.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; /** * Abstract GenericUDF for array functions @@ -44,6 +45,8 @@ public abstract class AbstractGenericUDFArrayBase extends GenericUDF { transient ListObjectInspector arrayOI; transient ObjectInspector[] argumentOIs; + transient ObjectInspector arrayElementOI; + transient Converter converter; protected AbstractGenericUDFArrayBase(String functionName, int minArgCount, int maxArgCount, ObjectInspector.Category outputCategory) { @@ -67,6 +70,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) //return ObjectInspectors based on expected output type arrayOI = (ListObjectInspector) arguments[ARRAY_IDX]; argumentOIs = arguments; + arrayElementOI = arrayOI.getListElementObjectInspector(); if (outputCategory == ObjectInspector.Category.LIST) { return initListOI(arguments); } else { @@ -123,4 +127,14 @@ ObjectInspector initListOI(ObjectInspector[] arguments) { return ObjectInspectorFactory.getStandardListObjectInspector(initOI(arguments)); } + void checkValueAndListElementTypes(ObjectInspector arrayElementOI, String functionName, ObjectInspector valueOI, + int elementIndex) throws UDFArgumentTypeException { + // Check if list element and value are of same type + if (!ObjectInspectorUtils.compareTypes(arrayElementOI, valueOI)) { + throw new UDFArgumentTypeException(elementIndex, + String.format("%s type element is expected at function %s(array<%s>,%s), but %s is found", + arrayElementOI.getTypeName(), functionName, arrayElementOI.getTypeName(), + arrayElementOI.getTypeName(), valueOI.getTypeName())); + } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArrayPosition.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArrayPosition.java new file mode 100644 index 000000000000..fadad42d9cc4 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArrayPosition.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.IntWritable; + +/** + * GenericUDFArrayPosition. + */ +@Description(name = "array_position", value = "_FUNC_(array, element) - Returns the position of the first occurrence of " + + "element in array. Array indexing starts at 1. If the element value is NULL, a NULL is returned.", extended = + "Example:\n" + " > SELECT _FUNC_(array(1, 2, 3,4,2), 2) FROM src;\n" + " 2") +public class GenericUDFArrayPosition extends AbstractGenericUDFArrayBase { + static final String FUNC_NAME = "ARRAY_POSITION"; + private static final int ELEMENT_IDX = 1; + + private transient ObjectInspector valueOI; + + public GenericUDFArrayPosition() { + super(FUNC_NAME, 2, 2, ObjectInspector.Category.PRIMITIVE); + } + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + super.initialize(arguments); + valueOI = arguments[ELEMENT_IDX]; + checkValueAndListElementTypes(arrayElementOI, FUNC_NAME, valueOI,ELEMENT_IDX); + return PrimitiveObjectInspectorFactory.writableIntObjectInspector; + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + Object array = arguments[ARRAY_IDX].get(); + Object value = arguments[ELEMENT_IDX].get(); + int arrayLength = arrayOI.getListLength(array); + if (arrayLength < 0 || value == null) { + return null; + } + + for (int index = 0; index < arrayLength; ++index) { + if (ObjectInspectorUtils.compare(value, valueOI, arrayOI.getListElement(array, index), arrayElementOI) == 0) { + return new IntWritable(index + 1); + } + } + return new IntWritable(0); + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNwayCompare.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNwayCompare.java index d71f04f91b82..cdc14286333e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNwayCompare.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFBaseNwayCompare.java @@ -62,6 +62,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen TypeInfo commonInfo = TypeInfoUtils.getTypeInfoFromObjectInspector(arguments[0]); for (int i = 1; i < arguments.length; i++) { + checkArgPrimitive(arguments, i); PrimitiveTypeInfo currInfo = (PrimitiveTypeInfo) TypeInfoUtils.getTypeInfoFromObjectInspector(arguments[i]); commonInfo = FunctionRegistry.getCommonClassForComparison( diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java index aa0059b1cf2b..82f15af47b4f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEnforceConstraint.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.exec.errors.DataConstraintViolationError; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -47,6 +48,10 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen + arguments.length); } + if (!(arguments[0] instanceof BooleanObjectInspector)) { + throw new UDFArgumentTypeException(0, + String.format("%s only takes BOOLEAN, got %s", getFuncName(), arguments[0].getTypeName())); + } boi = (BooleanObjectInspector) arguments[0]; return PrimitiveObjectInspectorFactory.writableBooleanObjectInspector; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEpochMilli.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEpochMilli.java index 58bd86d6456b..f17c909dc429 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEpochMilli.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFEpochMilli.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -49,8 +50,11 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen } if (arguments[0] instanceof TimestampObjectInspector) { tsOi = (TimestampObjectInspector) arguments[0]; - } else { + } else if (arguments[0] instanceof TimestampLocalTZObjectInspector) { tsWithLocalTzOi = (TimestampLocalTZObjectInspector) arguments[0]; + } else { + throw new UDFArgumentTypeException(0, + String.format("%s only takes TIMESTAMP, got %s", getFuncName(), arguments[0].getTypeName())); } return PrimitiveObjectInspectorFactory.writableLongObjectInspector; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFExceptionInVertex.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFExceptionInVertex.java index 55ed806081a9..0f12db5a6fdf 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFExceptionInVertex.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFExceptionInVertex.java @@ -76,27 +76,35 @@ public ObjectInspector initialize(ObjectInspector[] parameters) throws UDFArgume return PrimitiveObjectInspectorFactory.javaLongObjectInspector; } - public static String getVertexName(ObjectInspector[] parameters, int index) { - return ((WritableConstantStringObjectInspector) parameters[index]).getWritableConstantValue() - .toString(); + public static String getVertexName(ObjectInspector[] parameters, int index) throws UDFArgumentTypeException { + if (parameters[index] instanceof WritableConstantStringObjectInspector) { + return ((WritableConstantStringObjectInspector) parameters[index]).getWritableConstantValue() + .toString(); + } else { + throw new UDFArgumentTypeException(index, String.format( + "This argument takes only constant STRING, got %s", parameters[index].getTypeName())); + } } - public static String getTaskNumber(ObjectInspector[] parameters, int index) { + public static String getTaskNumber(ObjectInspector[] parameters, int index) throws UDFArgumentTypeException { return getExpressionAtIndex(parameters, index); } - public static String getTaskAttemptNumber(ObjectInspector[] parameters, int index) { + public static String getTaskAttemptNumber(ObjectInspector[] parameters, int index) throws UDFArgumentTypeException { return getExpressionAtIndex(parameters, index); } - private static String getExpressionAtIndex(ObjectInspector[] parameters, int index) { + private static String getExpressionAtIndex(ObjectInspector[] parameters, int index) throws UDFArgumentTypeException { if (parameters.length > index) { if (parameters[index] instanceof WritableConstantStringObjectInspector) { return ((WritableConstantStringObjectInspector) parameters[index]) .getWritableConstantValue().toString(); - } else { + } else if (parameters[index] instanceof WritableConstantIntObjectInspector) { return ((WritableConstantIntObjectInspector) parameters[index]).getWritableConstantValue() .toString(); + } else { + throw new UDFArgumentTypeException(index, String.format( + "This argument takes only constant STRING or INT, got %s", parameters[index].getTypeName())); } } else { return "*"; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFGrouping.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFGrouping.java index d11e51b3cbf0..649c4788b3ac 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFGrouping.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFGrouping.java @@ -71,6 +71,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen indices = new int[arguments.length - 1]; for (int i = 1; i < arguments.length; i++) { + checkArgPrimitive(arguments, i); PrimitiveObjectInspector arg2OI = (PrimitiveObjectInspector) arguments[i]; if (!(arg2OI instanceof ConstantObjectInspector)) { throw new UDFArgumentTypeException(i, "Must be a constant. Got: " + arg2OI.getClass().getSimpleName()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java index 3d0cb9687bed..433222429e7d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPFalse.java @@ -39,6 +39,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen if (arguments.length != 1) { throw new UDFArgumentLengthException("Invalid number of arguments"); } + checkArgPrimitive(arguments, 0); conditionConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java index 4bfb8983327b..14301e4f5a07 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotFalse.java @@ -39,6 +39,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen if (arguments.length != 1) { throw new UDFArgumentLengthException("Invalid number of arguments"); } + checkArgPrimitive(arguments, 0); conditionConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java index 65b80b577ed3..a92f29e27d14 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotTrue.java @@ -39,6 +39,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen if (arguments.length != 1) { throw new UDFArgumentLengthException("Invalid number of arguments"); } + checkArgPrimitive(arguments, 0); conditionConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java index 30d33fb8bbc3..208289013320 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPTrue.java @@ -39,6 +39,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen if (arguments.length != 1) { throw new UDFArgumentLengthException("Invalid number of arguments"); } + checkArgPrimitive(arguments, 0); conditionConverter = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableBooleanObjectInspector); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java index 8522abd0ee68..63fbcc0e7af1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFRegExp.java @@ -64,7 +64,7 @@ public class GenericUDFRegExp extends GenericUDF { @Override public void configure(MapredContext context) { if (context != null) { - if(HiveConf.getBoolVar(context.getJobConf(), HiveConf.ConfVars.HIVEUSEGOOGLEREGEXENGINE)){ + if(HiveConf.getBoolVar(context.getJobConf(), HiveConf.ConfVars.HIVE_USE_GOOGLE_REGEX_ENGINE)){ this.useGoogleRegexEngine=true; } } @@ -75,7 +75,7 @@ public void configure(MapredContext context) { public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { SessionState ss = SessionState.get(); if (ss != null) { - this.useGoogleRegexEngine = ss.getConf().getBoolVar(HiveConf.ConfVars.HIVEUSEGOOGLEREGEXENGINE); + this.useGoogleRegexEngine = ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_USE_GOOGLE_REGEX_ENGINE); } checkArgsSize(arguments, 2, 2); @@ -93,7 +93,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen regexConst = getConstantStringValue(arguments, 1); if (regexConst != null) { if(!useGoogleRegexEngine){ - //if(!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVEUSEGOOGLEREGEXENGINE)){ + //if(!HiveConf.getVar(hiveConf, HiveConf.ConfVars.HIVE_USE_GOOGLE_REGEX_ENGINE)){ patternConst = Pattern.compile(regexConst); }else{ patternConstR2j = com.google.re2j.Pattern.compile(regexConst); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java index b965410f9459..421c3fce70ad 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSQCountCheck.java @@ -48,6 +48,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen "Invalid scalar subquery expression. Subquery count check expected two argument but received: " + arguments.length); } + checkArgPrimitive(arguments, 0); converters[0] = ObjectInspectorConverters.getConverter(arguments[0], PrimitiveObjectInspectorFactory.writableLongObjectInspector); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java index f95e75b04e05..e091965ce4af 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java @@ -53,6 +53,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen converters = new ObjectInspectorConverters.Converter[arguments.length]; for (int i = 0; i < arguments.length; i++) { + checkArgPrimitive(arguments, i); converters[i] = ObjectInspectorConverters.getConverter(arguments[i], PrimitiveObjectInspectorFactory.writableStringObjectInspector); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToArray.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToArray.java new file mode 100644 index 000000000000..915188a363a5 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToArray.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.SettableUDF; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +@Description(name = "toArray", value = "_FUNC_(x) - converts it's parameter to _FUNC_" + + "Currently only null literal is supported.") +public class GenericUDFToArray extends GenericUDF implements SettableUDF { + private ListTypeInfo typeInfo; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + return TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo); + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + return null; + } + + @Override + public String getDisplayString(String[] children) { + return String.format("toArray(%s)", String.join(",", children)); + } + + @Override + public void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException { + this.typeInfo = (ListTypeInfo) typeInfo; + } + + @Override + public TypeInfo getTypeInfo() { + return typeInfo; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToMap.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToMap.java new file mode 100644 index 000000000000..85d534ccbbff --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToMap.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.SettableUDF; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +/** + * GenericUDFMap. + * + */ +@Description(name = "toMap", value = "_FUNC_(x) - converts it's parameter to _FUNC_" + + "Currently only null literal is supported.") +public class GenericUDFToMap extends GenericUDF implements SettableUDF { + private MapTypeInfo typeInfo; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + return TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo); + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + return null; + } + + @Override + public String getDisplayString(String[] children) { + return String.format("toMap(%s)", String.join(",", children)); + } + + @Override + public void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException { + this.typeInfo = (MapTypeInfo) typeInfo; + } + + @Override + public TypeInfo getTypeInfo() { + return typeInfo; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToStruct.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToStruct.java new file mode 100644 index 000000000000..f59d11ce7730 --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFToStruct.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.udf.SettableUDF; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +@Description(name = "toStruct", value = "_FUNC_(x) - converts it's parameter to _FUNC_" + + "Currently only null literal is supported.") +public class GenericUDFToStruct extends GenericUDF implements SettableUDF { + private StructTypeInfo typeInfo; + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + return TypeInfoUtils.getStandardWritableObjectInspectorFromTypeInfo(typeInfo); + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + return null; + } + + @Override + public String getDisplayString(String[] children) { + return String.format("toStruct(%s)", String.join(",", children)); } + + @Override + public void setTypeInfo(TypeInfo typeInfo) throws UDFArgumentException { + this.typeInfo = (StructTypeInfo) typeInfo; + } + + @Override + public TypeInfo getTypeInfo() { + return typeInfo; + } +} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java index 3cebbe7af853..ffb553f78c53 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java @@ -106,6 +106,9 @@ public class GenericUDFTrunc extends GenericUDF { @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + for (int i = 0; i < arguments.length; i++) { + checkArgPrimitive(arguments, i); + } if (arguments.length == 2) { inputType1 = ((PrimitiveObjectInspector) arguments[0]).getPrimitiveCategory(); inputType2 = ((PrimitiveObjectInspector) arguments[1]).getPrimitiveCategory(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTumbledWindow.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTumbledWindow.java index 17823c01ee55..7c6c3b24e970 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTumbledWindow.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTumbledWindow.java @@ -71,6 +71,9 @@ public class GenericUDFTumbledWindow extends GenericUDF { */ @Override public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { checkArgsSize(arguments, 2, 3); + for (int i = 0; i < arguments.length; i++) { + checkArgPrimitive(arguments, i); + } //arg 1 has to be of timestamp type //arg 2 has to be an interval //arg 3 has to be absent or timestamp type diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java index 97d04676e76b..b1c3b767deca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java @@ -238,7 +238,7 @@ protected SplitResult getSplitResult(boolean generateLightWeightSplits) + ConfVars.LLAP_HS2_ENABLE_COORDINATOR.varname + " enabled"); } ApplicationId extClientAppId = coordinator.createExtClientAppId(); - String externalDagName = SessionState.get().getConf().getVar(ConfVars.HIVEQUERYNAME); + String externalDagName = SessionState.get().getConf().getVar(ConfVars.HIVE_QUERY_NAME); StringBuilder sb = new StringBuilder(); sb.append("Generated appID ").append(extClientAppId.toString()).append(" for LLAP splits"); @@ -271,8 +271,8 @@ private PlanFragment createPlanFragment(String query, ApplicationId splitsAppId) throws HiveException { HiveConf conf = new HiveConf(SessionState.get().getConf()); - HiveConf.setVar(conf, ConfVars.HIVEFETCHTASKCONVERSION, "none"); - HiveConf.setVar(conf, ConfVars.HIVEQUERYRESULTFILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY); + HiveConf.setVar(conf, ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); + HiveConf.setVar(conf, ConfVars.HIVE_QUERY_RESULT_FILEFORMAT, PlanUtils.LLAP_OUTPUT_FORMAT_KEY); String originalMode = HiveConf.getVar(conf, ConfVars.HIVE_EXECUTION_MODE); @@ -451,7 +451,7 @@ private SplitResult getSplits(JobConf job, TezWork work, Schema schema, Applicat // Update the queryId to use the generated extClientAppId. See comment below about // why this is done. - HiveConf.setVar(wxConf, HiveConf.ConfVars.HIVEQUERYID, extClientAppId.toString()); + HiveConf.setVar(wxConf, HiveConf.ConfVars.HIVE_QUERY_ID, extClientAppId.toString()); Vertex wx = utils.createVertex(wxConf, mapWork, scratchDir, work, DagUtils.createTezLrMap(appJarLr, null)); String vertexName = wx.getName(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFStack.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFStack.java index f3cc2b4f6389..d75d3b627144 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFStack.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFStack.java @@ -23,13 +23,13 @@ import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUtils.ReturnObjectInspectorResolver; import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantIntObjectInspector; import org.apache.hadoop.io.IntWritable; /** @@ -63,13 +63,21 @@ public StructObjectInspector initialize(ObjectInspector[] args) } if (!(args[0] instanceof ConstantObjectInspector)) { throw new UDFArgumentException( + "The first argument to STACK() must be a constant."); + } + final Object value = ((ConstantObjectInspector) args[0]).getWritableConstantValue(); + if (value == null) { + throw new UDFArgumentException("The first argument of STACK() must not be null."); + } + if (!(value instanceof IntWritable)) { + throw new UDFArgumentTypeException( + 0, "The first argument to STACK() must be a constant integer (got " + args[0].getTypeName() + " instead)."); } - numRows = (IntWritable) - ((ConstantObjectInspector)args[0]).getWritableConstantValue(); + numRows = (IntWritable) value; - if (numRows == null || numRows.get() < 1) { + if (numRows.get() < 1) { throw new UDFArgumentException( "STACK() expects its first argument to be >= 1."); } @@ -109,7 +117,7 @@ public StructObjectInspector initialize(ObjectInspector[] args) @Override public void process(Object[] args) - throws HiveException, UDFArgumentException { + throws HiveException { for (int ii = 0; ii < numRows.get(); ++ii) { for (int jj = 0; jj < numCols; ++jj) { int index = ii * numCols + jj + 1; @@ -117,7 +125,7 @@ public void process(Object[] args) forwardObj[jj] = returnOIResolvers.get(jj).convertIfNecessary(args[index], argOIs.get(index)); } else { - forwardObj[ii] = null; + forwardObj[jj] = null; } } forward(forwardObj); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java index 8bf51e5b8c3c..fb342e27153d 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/ptf/WindowingTableFunction.java @@ -213,7 +213,7 @@ private boolean streamingPossible(Configuration cfg, WindowFunctionDef wFnDef) return true; } - int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVEJOINCACHESIZE); + int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVE_JOIN_CACHE_SIZE); if (windowLimit < (start.getAmt() + end.getAmt() + 1)) { return false; @@ -271,7 +271,7 @@ private int[] setCanAcceptInputAsStream(Configuration cfg) throws HiveException return null; } - int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVEJOINCACHESIZE); + int windowLimit = HiveConf.getIntVar(cfg, ConfVars.HIVE_JOIN_CACHE_SIZE); if (windowLimit < (endPos - startPos + 1)) { return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java index 1dd9d8bf9db9..1d4b8fa22d03 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/HiveStrictManagedMigration.java @@ -574,7 +574,7 @@ private void readControlConfigs(FileSystem fs, Path path) { }); if (runOptions.shouldModifyManagedTableLocation || runOptions.shouldMoveExternal) { Configuration oldConf = new Configuration(conf); - HiveConf.setVar(oldConf, HiveConf.ConfVars.METASTOREWAREHOUSE, runOptions.oldWarehouseRoot); + HiveConf.setVar(oldConf, HiveConf.ConfVars.METASTORE_WAREHOUSE, runOptions.oldWarehouseRoot); oldWh = ThreadLocal.withInitial(() -> { try { @@ -650,7 +650,7 @@ static WarehouseRootCheckResult checkOldWarehouseRoot(RunOptions runOptions, Hiv shouldMoveExternal = false; } else { String currentPathString = shouldModifyManagedTableLocation ? - HiveConf.getVar(conf, HiveConf.ConfVars.METASTOREWAREHOUSE) : + HiveConf.getVar(conf, HiveConf.ConfVars.METASTORE_WAREHOUSE) : HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL); if (arePathsEqual(conf, runOptions.oldWarehouseRoot, currentPathString)) { LOG.info("oldWarehouseRoot is the same as the target path {}." diff --git a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java index 67df6a7bcec4..8871bb54e349 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/util/UpgradeTool.java @@ -175,7 +175,7 @@ private static IMetaStoreClient getHMS(HiveConf conf) { return RetryingMetaStoreClient.getProxy(conf, true); } catch (MetaException e) { throw new RuntimeException("Error connecting to Hive Metastore URI: " - + conf.getVar(HiveConf.ConfVars.METASTOREURIS) + ". " + e.getMessage(), e); + + conf.getVar(HiveConf.ConfVars.METASTORE_URIS) + ". " + e.getMessage(), e); } } /** diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java index 38c44f7722fe..d26f3774af70 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestCompactionTxnHandler.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hive.common.util.HiveVersionInfo; import org.junit.After; @@ -463,7 +464,7 @@ public void testMarkFailed() throws Exception { txnHandler.markFailed(ci); fail("The first call to markFailed() must have failed as this call did " + "not throw the expected exception"); - } catch (MetaException e) { + } catch (IllegalStateException e) { // This is expected assertTrue(e.getMessage().contains("No record with CQ_ID=")); } @@ -753,7 +754,7 @@ public void testFindPotentialCompactions() throws Exception { LockResponse res = txnHandler.lock(req); assertTrue(res.getState() == LockState.ACQUIRED); txnHandler.commitTxn(new CommitTxnRequest(txnid)); - assertEquals(0, txnHandler.numLocksInLockTable()); + assertEquals(0, txnHandler.getNumLocks()); Set potentials = txnHandler.findPotentialCompactions(100, -1L); assertEquals(2, potentials.size()); @@ -1014,7 +1015,7 @@ public void testFindPotentialCompactions_limitFetchSize() throws Exception { LockResponse res = txnHandler.lock(req); assertSame(res.getState(), LockState.ACQUIRED); txnHandler.commitTxn(new CommitTxnRequest(txnId)); - assertEquals(0, txnHandler.numLocksInLockTable()); + assertEquals(0, txnHandler.getNumLocks()); Set potentials = txnHandler.findPotentialCompactions(100, -1L); assertEquals(1, potentials.size()); diff --git a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java index ee810c2117e5..8de9cbe93e10 100644 --- a/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java +++ b/ql/src/test/org/apache/hadoop/hive/metastore/txn/TestTxnHandler.java @@ -95,7 +95,6 @@ import java.util.List; import java.util.concurrent.CompletableFuture; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import java.util.stream.LongStream; @@ -107,7 +106,6 @@ import static junit.framework.Assert.fail; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_IS_TRANSACTIONAL; import static org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES; -import static org.apache.hadoop.hive.metastore.utils.LockTypeUtil.getEncoding; /** * Tests for TxnHandler. @@ -1033,7 +1031,7 @@ public void testMultipleLock() throws Exception { res = txnHandler.checkLock(new CheckLockRequest(lockid)); assertTrue(res.getState() == LockState.ACQUIRED); txnHandler.unlock(new UnlockRequest(lockid)); - assertEquals(0, txnHandler.numLocksInLockTable()); + assertEquals(0, txnHandler.getNumLocks()); } @Test @@ -1088,7 +1086,7 @@ public void testUnlockOnCommit() throws Exception { LockResponse res = txnHandler.lock(req); assertTrue(res.getState() == LockState.ACQUIRED); txnHandler.commitTxn(new CommitTxnRequest(txnid)); - assertEquals(0, txnHandler.numLocksInLockTable()); + assertEquals(0, txnHandler.getNumLocks()); } @Test @@ -1104,7 +1102,7 @@ public void testUnlockOnAbort() throws Exception { LockResponse res = txnHandler.lock(req); assertTrue(res.getState() == LockState.ACQUIRED); txnHandler.abortTxn(new AbortTxnRequest(txnid)); - assertEquals(0, txnHandler.numLocksInLockTable()); + assertEquals(0, txnHandler.getNumLocks()); } @Test @@ -1462,113 +1460,6 @@ public void showLocks() throws Exception { for (int i = 0; i < saw.length; i++) assertTrue("Didn't see lock id " + i, saw[i]); } - @Test - @Ignore("Wedges Derby") - public void deadlockDetected() throws Exception { - LOG.debug("Starting deadlock test"); - - if (txnHandler instanceof TxnHandler) { - final TxnHandler tHndlr = (TxnHandler)txnHandler; - Connection conn = tHndlr.getDbConn(Connection.TRANSACTION_SERIALIZABLE); - try { - Statement stmt = conn.createStatement(); - long now = tHndlr.getDbTime(conn); - stmt.executeUpdate("INSERT INTO \"TXNS\" (\"TXN_ID\", \"TXN_STATE\", \"TXN_STARTED\", \"TXN_LAST_HEARTBEAT\", " + - "txn_user, txn_host) values (1, 'o', " + now + ", " + now + ", 'shagy', " + - "'scooby.com')"); - stmt.executeUpdate("INSERT INTO \"HIVE_LOCKS\" (\"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", " + - "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", " + - "\"HL_USER\", \"HL_HOST\") VALUES (1, 1, 1, 'MYDB', 'MYTABLE', 'MYPARTITION', '" + - tHndlr.LOCK_WAITING + "', '" + getEncoding(LockType.EXCLUSIVE) + "', " + now + ", 'fred', " + - "'scooby.com')"); - conn.commit(); - } finally { - tHndlr.closeDbConn(conn); - } - - final AtomicBoolean sawDeadlock = new AtomicBoolean(); - - final Connection conn1 = tHndlr.getDbConn(Connection.TRANSACTION_SERIALIZABLE); - final Connection conn2 = tHndlr.getDbConn(Connection.TRANSACTION_SERIALIZABLE); - try { - - for (int i = 0; i < 5; i++) { - Thread t1 = new Thread() { - @Override - public void run() { - try { - try { - updateTxns(conn1); - updateLocks(conn1); - Thread.sleep(1000); - conn1.commit(); - LOG.debug("no exception, no deadlock"); - } catch (SQLException e) { - try { - tHndlr.checkRetryable(e, "thread t1"); - LOG.debug("Got an exception, but not a deadlock, SQLState is " + - e.getSQLState() + " class of exception is " + e.getClass().getName() + - " msg is <" + e.getMessage() + ">"); - } catch (TxnHandler.RetryException de) { - LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + - "exception is " + e.getClass().getName() + " msg is <" + e - .getMessage() + ">"); - sawDeadlock.set(true); - } - } - conn1.rollback(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - - Thread t2 = new Thread() { - @Override - public void run() { - try { - try { - updateLocks(conn2); - updateTxns(conn2); - Thread.sleep(1000); - conn2.commit(); - LOG.debug("no exception, no deadlock"); - } catch (SQLException e) { - try { - tHndlr.checkRetryable(e, "thread t2"); - LOG.debug("Got an exception, but not a deadlock, SQLState is " + - e.getSQLState() + " class of exception is " + e.getClass().getName() + - " msg is <" + e.getMessage() + ">"); - } catch (TxnHandler.RetryException de) { - LOG.debug("Forced a deadlock, SQLState is " + e.getSQLState() + " class of " + - "exception is " + e.getClass().getName() + " msg is <" + e - .getMessage() + ">"); - sawDeadlock.set(true); - } - } - conn2.rollback(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }; - - t1.start(); - t2.start(); - t1.join(); - t2.join(); - if (sawDeadlock.get()) break; - } - assertTrue(sawDeadlock.get()); - } finally { - conn1.rollback(); - tHndlr.closeDbConn(conn1); - conn2.rollback(); - tHndlr.closeDbConn(conn2); - } - } - } - /** * This cannnot be run against Derby (thus in UT) but it can run against MySQL. * 1. add to metastore/pom.xml @@ -1578,9 +1469,9 @@ public void run() { * 5.1.30 * * 2. Hack in the c'tor of this class - * conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, "jdbc:mysql://localhost/metastore"); + * conf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, "jdbc:mysql://localhost/metastore"); * conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "hive"); - * conf.setVar(HiveConf.ConfVars.METASTOREPWD, "hive"); + * conf.setVar(HiveConf.ConfVars.METASTORE_PWD, "hive"); * conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); * 3. Remove TxnDbUtil.prepDb(); in TxnHandler.checkQFileTestHack() * @@ -1662,20 +1553,6 @@ public void uncaughtException(Thread t, Throwable e) { } } - @Test - public void testRetryableRegex() throws Exception { - SQLException sqlException = new SQLException("ORA-08177: can't serialize access for this transaction", "72000"); - // Note that we have 3 regex'es below - conf.setVar(HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, "^Deadlock detected, roll back,.*08177.*,.*08178.*"); - boolean result = TxnHandler.isRetryable(conf, sqlException); - Assert.assertTrue("regex should be retryable", result); - - sqlException = new SQLException("This error message, has comma in it"); - conf.setVar(HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, ".*comma.*"); - result = TxnHandler.isRetryable(conf, sqlException); - Assert.assertTrue("regex should be retryable", result); - } - private List replOpenTxnForTest(long startId, int numTxn, String replPolicy) throws Exception { conf.setIntVar(HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, numTxn); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 1e2fe3303afe..39e09a8eb178 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -65,9 +65,8 @@ import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnState; -import org.apache.hadoop.hive.metastore.api.AbortCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -123,7 +122,7 @@ void initHiveConf() { //TestTxnCommandsWithSplitUpdateAndVectorization has the vectorized version //of these tests. HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); - HiveConf.setVar(hiveConf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + HiveConf.setVar(hiveConf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_DROP_PARTITION_USE_BASE, false); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_RENAME_PARTITION_MAKE_COPY, false); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_CREATE_TABLE_USE_SUFFIX, false); @@ -163,9 +162,9 @@ public void testInsertOverwrite() throws Exception { List rs = runStatementOnDriver("select a from " + Table.ACIDTBL + " where b = 2"); Assert.assertEquals(1, rs.size()); Assert.assertEquals("1", rs.get(0)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert overwrite table " + Table.ACIDTBL + " values(3,2)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(5,6)"); rs = runStatementOnDriver("select a from " + Table.ACIDTBL + " order by a"); Assert.assertEquals(2, rs.size()); @@ -1463,9 +1462,9 @@ public void testMoreBucketsThanReducers() throws Exception { // todo: try using set VerifyNumReducersHook.num.reducers=10; d.destroy(); HiveConf hc = new HiveConf(hiveConf); - hc.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 1); + hc.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 1); //this is used in multiple places, SemanticAnalyzer.getBucketingSortingDest() among others - hc.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 1); + hc.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 1); hc.setBoolVar(HiveConf.ConfVars.HIVE_EXPLAIN_USER, false); d = new Driver(hc); d.setMaxRows(10000); @@ -1483,9 +1482,9 @@ public void testMoreBucketsThanReducers2() throws Exception { //see bucket_num_reducers.q bucket_num_reducers2.q d.destroy(); HiveConf hc = new HiveConf(hiveConf); - hc.setIntVar(HiveConf.ConfVars.MAXREDUCERS, 2); + hc.setIntVar(HiveConf.ConfVars.MAX_REDUCERS, 2); //this is used in multiple places, SemanticAnalyzer.getBucketingSortingDest() among others - hc.setIntVar(HiveConf.ConfVars.HADOOPNUMREDUCERS, 2); + hc.setIntVar(HiveConf.ConfVars.HADOOP_NUM_REDUCERS, 2); d = new Driver(hc); d.setMaxRows(10000); runStatementOnDriver("create table fourbuckets (a int, b int) clustered by (a) into 4 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); @@ -2589,15 +2588,15 @@ public void testFetchTaskCachingWithConversion() throws Exception { runStatementOnDriver("insert into table fetch_task_table values (1,2), (3,4), (5,6)"); List expectedRes = runStatementOnDriver("select * from fetch_task_table"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEFETCHTASKCACHING, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CACHING, true); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.run("select * from fetch_task_table"); Assert.assertFalse(d.getFetchTask().isCachingEnabled()); d.getFetchTask().fetch(actualRes); Assert.assertEquals(actualRes, expectedRes); actualRes.clear(); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "more"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); d.run("select * from fetch_task_table"); Assert.assertTrue(d.getFetchTask().isCachingEnabled()); d.getFetchTask().fetch(actualRes); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index b96ec876db20..3f574e384ede 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -63,7 +63,8 @@ import org.apache.hadoop.hive.metastore.api.AbortCompactionRequest; import org.apache.hadoop.hive.metastore.api.AbortCompactionResponseElement; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.CompactionHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.ql.ddl.DDLTask; @@ -74,7 +75,7 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; -import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService; +import org.apache.hadoop.hive.metastore.txn.service.AcidOpenTxnsCounterService; import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionContext; import org.apache.hadoop.hive.ql.scheduled.ScheduledQueryExecutionService; import org.apache.hadoop.hive.ql.schq.MockScheduledQueryService; @@ -159,7 +160,7 @@ void initHiveConf() { HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, false); //TestTxnCommands2WithAbortCleanupUsingCompactionCycle has the tests with abort cleanup in compaction cycle MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEAN_ABORTS_USING_CLEANER, true); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_ACID_TRUNCATE_USE_BASE, false); } @@ -204,8 +205,8 @@ public void testOrcNoPPD() throws Exception { * @throws Exception */ private void testOrcPPD(boolean enablePPD) throws Exception { - boolean originalPpd = hiveConf.getBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, enablePPD);//enables ORC PPD + boolean originalPpd = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, enablePPD);//enables ORC PPD //create delta_0001_0001_0000 (should push predicate here) runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(new int[][]{{1, 2}, {3, 4}})); List explain; @@ -263,7 +264,7 @@ private void testOrcPPD(boolean enablePPD) throws Exception { List rs1 = runStatementOnDriver(query); int [][] resultData = new int[][] {{3, 5}, {5, 6}, {9, 10}}; Assert.assertEquals("Update failed", stringifyValues(resultData), rs1); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, originalPpd); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, originalPpd); } static void assertExplainHasString(String string, List queryPlan, String errMsg) { @@ -1076,15 +1077,15 @@ public void testValidTxnsBookkeeping() throws Exception { @Test public void testSimpleRead() throws Exception { - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "more"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "more"); int[][] tableData = {{1,2},{3,3}}; runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(tableData)); int[][] tableData2 = {{5,3}}; //this will cause next txn to be marked aborted but the data is still written to disk - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " " + makeValuesClause(tableData2)); assert hiveConf.get(ValidTxnWriteIdList.VALID_TABLES_WRITEIDS_KEY) == null : "previous txn should've cleaned it"; - //so now if HIVEFETCHTASKCONVERSION were to use a stale value, it would use a + //so now if HIVE_FETCH_TASK_CONVERSION were to use a stale value, it would use a //ValidWriteIdList with HWM=MAX_LONG, i.e. include the data for aborted txn List rs = runStatementOnDriver("select * from " + Table.ACIDTBL); Assert.assertEquals("Extra data", 2, rs.size()); @@ -1225,7 +1226,7 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb //generate enough delta files so that Initiator can trigger auto compaction runStatementOnDriver("insert into " + tblName + " values(" + (i + 1) + ", 'foo'),(" + (i + 2) + ", 'bar'),(" + (i + 3) + ", 'baz')"); } - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); @@ -1244,9 +1245,14 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.ACID_HOUSEKEEPER_SERVICE_INTERVAL, 10, TimeUnit.MILLISECONDS); + MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.COMPACTION_HOUSEKEEPER_SERVICE_INTERVAL, 10, + TimeUnit.MILLISECONDS); MetastoreTaskThread houseKeeper = new AcidHouseKeeperService(); + MetastoreTaskThread compactionHouseKeeper = new CompactionHouseKeeperService(); houseKeeper.setConf(hiveConf); + compactionHouseKeeper.setConf(hiveConf); houseKeeper.run(); + compactionHouseKeeper.run(); checkCompactionState(new CompactionsByState(numDidNotInitiateCompactions,numFailedCompactions,0,0,0,0,numFailedCompactions + numDidNotInitiateCompactions), countCompacts(txnHandler)); txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MAJOR)); @@ -1260,6 +1266,7 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb checkCompactionState(new CompactionsByState(numDidNotInitiateCompactions,numFailedCompactions + 2,0,0,0,0,numFailedCompactions + 2 + numDidNotInitiateCompactions), countCompacts(txnHandler)); houseKeeper.run(); + compactionHouseKeeper.run(); //COMPACTOR_HISTORY_RETENTION_FAILED failed compacts left (and no other since we only have failed ones here) checkCompactionState(new CompactionsByState( MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE), @@ -1267,7 +1274,7 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE)), countCompacts(txnHandler)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR)); //at this point "show compactions" should have (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by user) checkCompactionState(new CompactionsByState( @@ -1287,6 +1294,7 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb runCleaner(hiveConf); // transition to Success state houseKeeper.run(); + compactionHouseKeeper.run(); checkCompactionState(new CompactionsByState( MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE), MetastoreConf.getIntVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), 0, 0, 1, 0, @@ -1563,9 +1571,9 @@ private void writeBetweenWorkerAndCleanerForVariousTblProperties(String tblPrope runWorker(hiveConf); //delete something, but make sure txn is rolled back - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("delete from " + tblName + " where a = 1"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); List expected = new ArrayList<>(); expected.add("1\tfoo"); @@ -1622,7 +1630,7 @@ private void writeBetweenWorkerAndCleanerForVariousTblProperties(String tblPrope public void testFailHeartbeater() throws Exception { // Fail heartbeater, so that we can get a RuntimeException from the query. // More specifically, it's the original IOException thrown by either MR's or Tez's progress monitoring loop. - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILHEARTBEATER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_HEARTBEATER, true); Exception exception = null; try { runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(new int[][]{{1, 2}, {3, 4}})); @@ -1630,7 +1638,7 @@ public void testFailHeartbeater() throws Exception { exception = e; } Assert.assertNotNull(exception); - Assert.assertTrue(exception.getMessage().contains("HIVETESTMODEFAILHEARTBEATER=true")); + Assert.assertTrue(exception.getMessage().contains("HIVE_TEST_MODE_FAIL_HEARTBEATER=true")); } @Test @@ -1692,9 +1700,9 @@ public void testCompactWithDelete() throws Exception { public void testNoHistory() throws Exception { int[][] tableData = {{1,2},{3,4}}; runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); @@ -1764,7 +1772,7 @@ protected void testACIDwithSchemaEvolutionForVariousTblProperties(String tblProp } protected void createAbortLowWaterMark() throws Exception{ - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("select * from " + Table.ACIDTBL); // wait for metastore.txn.opentxn.timeout Thread.sleep(1000); @@ -1774,7 +1782,7 @@ protected void createAbortLowWaterMark() throws Exception{ @Test public void testETLSplitStrategyForACID() throws Exception { hiveConf.setVar(HiveConf.ConfVars.HIVE_ORC_SPLIT_STRATEGY, "ETL"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEOPTINDEXFILTER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPT_INDEX_FILTER, true); runStatementOnDriver("insert into " + Table.ACIDTBL + " values(1,2)"); runStatementOnDriver("alter table " + Table.ACIDTBL + " compact 'MAJOR'"); runWorker(hiveConf); @@ -2437,9 +2445,9 @@ public void testCleanerForTxnToWriteId() throws Exception { int[][] tableData5 = {{5, 6}}; runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p=3) (a,b) " + makeValuesClause(tableData3)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData4)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // Keep an open txn which refers to the aborted txn. Context ctx = new Context(hiveConf); @@ -2448,9 +2456,9 @@ public void testCleanerForTxnToWriteId() throws Exception { txnMgr.getValidTxns(); // Start an INSERT statement transaction and roll back this transaction. - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData5)); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) " + makeValuesClause(tableData5)); @@ -2505,9 +2513,9 @@ public void testMmTableAbortWithCompaction() throws Exception { Assert.assertEquals("1", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(3,4)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 1 delta and 1 base directory. The base one is the aborted one. verifyDeltaDirAndResult(2, Table.MMTBL.toString(), "", resultData1); @@ -2541,9 +2549,9 @@ public void testMmTableAbortWithCompaction() throws Exception { // 7. add few more rows runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(7,8)"); // 8. add one more aborted delta - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(9,10)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // 9. Perform a MAJOR compaction, expectation is it should remove aborted base dir runStatementOnDriver("alter table "+ Table.MMTBL + " compact 'MAJOR'"); @@ -2575,9 +2583,9 @@ public void testMmTableAbortWithCompactionNoCleanup() throws Exception { Assert.assertEquals("2", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + " values(3,4)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 1 delta and 1 base directory. The base one is the aborted one. verifyDeltaDirAndResult(3, Table.MMTBL.toString(), "", resultData1); r1 = runStatementOnDriver("select count(*) from " + Table.MMTBL); @@ -2597,9 +2605,9 @@ public void testMmTableAbortWithCompactionNoCleanup() throws Exception { // 4. add few more rows runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(7,8)"); // 5. add one more aborted delta - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.MMTBL + "(a,b) values(9,10)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); verifyDeltaDirAndResult(5, Table.MMTBL.toString(), "", resultData3); // 6. Perform a MAJOR compaction, expectation is it should remove aborted delta dir @@ -2629,9 +2637,9 @@ public void testDynPartInsertWithAborts() throws Exception { verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert into " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData); int count = TestTxnDbUtil @@ -2666,10 +2674,10 @@ public void testDynPartInsertWithMultiPartitionAborts() throws Exception { Assert.assertEquals("4", r1.get(0)); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert into " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); runStatementOnDriverWithAbort("insert into " + Table.ACIDTBLPART + " partition(p) values(3,3,'p2'),(4,4,'p2')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p2", resultData); r1 = runStatementOnDriver("select count(*) from " + Table.ACIDTBLPART); @@ -2709,9 +2717,9 @@ public void testDynPartIOWWithAborts() throws Exception { verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert overwrite table " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyBaseDir(1, Table.ACIDTBLPART.toString(), "p=p1"); @@ -2748,10 +2756,10 @@ public void testDynPartIOWWithMultiPartitionAborts() throws Exception { Assert.assertEquals("4", r1.get(0)); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("insert overwrite table " + Table.ACIDTBLPART + " partition(p) values(3,3,'p1'),(4,4,'p1')"); runStatementOnDriverWithAbort("insert overwrite table " + Table.ACIDTBLPART + " partition(p) values(3,3,'p2'),(4,4,'p2')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyBaseDir(1, Table.ACIDTBLPART.toString(), "p=p1"); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=p2", resultData); @@ -2796,9 +2804,9 @@ public void testDynPartUpdateWithAborts() throws Exception { verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData1); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("update " + Table.ACIDTBLPART + " set b=a+2 where a<5"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData1); verifyDeleteDeltaDir(1, Table.ACIDTBLPART.toString(), "p=p1"); @@ -2836,12 +2844,12 @@ public void testDynPartMergeWithAborts() throws Exception { runStatementOnDriver("insert into " + TestTxnCommands2.Table.NONACIDORCTBL + " " + makeValuesClause(sourceVals)); // forcing a txn to abort before addDynamicPartitions - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, true); runStatementOnDriverWithAbort("merge into " + Table.ACIDTBLPART + " using " + TestTxnCommands2.Table.NONACIDORCTBL + " as s ON " + Table.ACIDTBLPART + ".a = s.a " + "when matched then update set b = s.b " + "when not matched then insert values(s.a, s.b, 'newpart')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILLOADDYNAMICPARTITION, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_LOAD_DYNAMIC_PARTITION, false); verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p1", resultData); verifyDeleteDeltaDir(1, Table.ACIDTBLPART.toString(), "p=p1"); verifyDeltaDirAndResult(1, Table.ACIDTBLPART.toString(), "p=newpart", resultData); @@ -2886,9 +2894,9 @@ public void testFullACIDAbortWithMinorMajorCompaction() throws Exception { Assert.assertEquals("1", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(3,4)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 2 delta directories. verifyDeltaDirAndResult(2, Table.ACIDTBL.toString(), "", resultData1); @@ -2921,9 +2929,9 @@ public void testFullACIDAbortWithMinorMajorCompaction() throws Exception { runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(7,8)"); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(9,10)"); // 7. add one more aborted delta - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(11,12)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // 8. Perform a MAJOR compaction runStatementOnDriver("alter table "+ Table.ACIDTBL + " compact 'MAJOR'"); @@ -2953,9 +2961,9 @@ public void testFullACIDAbortWithMajorCompaction() throws Exception { Assert.assertEquals("2", r1.get(0)); // 2. Let a transaction be aborted - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(5,6)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // There should be 2 delta and 1 base directory. The base one is the aborted one. verifyDeltaDirAndResult(3, Table.ACIDTBL.toString(), "", resultData1); r1 = runStatementOnDriver("select count(*) from " + Table.ACIDTBL); @@ -2987,9 +2995,9 @@ public void testFullACIDAbortWithCompactionNoCleanup() throws Exception { verifyDeltaDirAndResult(2, Table.ACIDTBL.toString(), "", resultData1); // 2. abort one txns - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBL + "(a,b) values(5,6)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); verifyDeltaDirAndResult(3, Table.ACIDTBL.toString(), "", resultData1); // 3. Perform a MAJOR compaction. @@ -3019,14 +3027,14 @@ public void testFullACIDAbortWithManyPartitions() throws Exception { verifyDeltaDirAndResult(2, Table.ACIDTBLPART.toString(), "p=p3", resultData1); // 2. abort two txns in each partition - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p1') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p2') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p3') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p1') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p2') (a,b) values(5,6)"); runStatementOnDriver("insert into " + Table.ACIDTBLPART + " partition(p='p3') (a,b) values(5,6)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); verifyDeltaDirAndResult(4, Table.ACIDTBLPART.toString(), "p=p1", resultData1); verifyDeltaDirAndResult(4, Table.ACIDTBLPART.toString(), "p=p2", resultData1); verifyDeltaDirAndResult(4, Table.ACIDTBLPART.toString(), "p=p3", resultData1); @@ -3346,8 +3354,11 @@ public void testNoTxnComponentsForScheduledQueries() throws Exception { // Run AcidHouseKeeperService to cleanup the COMPLETED_TXN_COMPONENTS. MetastoreTaskThread houseKeeper = new AcidHouseKeeperService(); + MetastoreTaskThread compactionHouseKeeper = new CompactionHouseKeeperService(); houseKeeper.setConf(hiveConf); + compactionHouseKeeper.setConf(hiveConf); houseKeeper.run(); + compactionHouseKeeper.run(); // Check whether the table is compacted. fileStatuses = fs.globStatus(new Path(getWarehouseDir() + "/" + tableName + "/*")); @@ -3398,7 +3409,7 @@ public void testCompactionOutputDirectoryNamesOnPartitionsAndOldDeltasDeleted() public void testShowCompactionOrder() throws Exception { d.destroy(); - hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); d = new Driver(hiveConf); //generate some compaction history runStatementOnDriver("drop database if exists mydb1 cascade"); @@ -3434,7 +3445,7 @@ public void testShowCompactionOrder() throws Exception { runStatementOnDriver("insert into T values(1,4)");//makes delta_2_2 in T2 //create failed compaction attempt so that compactor txn is aborted - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); runStatementOnDriver("alter table T compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); // Verify compaction order @@ -3454,7 +3465,7 @@ public void testShowCompactionOrder() throws Exception { public void testAbortCompaction() throws Exception { d.destroy(); - hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); d = new Driver(hiveConf); //generate some compaction history runStatementOnDriver("drop database if exists mydb1 cascade"); @@ -3491,7 +3502,7 @@ public void testAbortCompaction() throws Exception { runStatementOnDriver("insert into myT1 values(1,4)");//makes delta_2_2 in T2 //create failed compaction attempt so that compactor txn is aborted - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); runStatementOnDriver("alter table myT1 compact 'minor'"); TestTxnCommands2.runWorker(hiveConf); // Verify compaction order diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java index 741078028918..b3fe87a8be4d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands3.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -426,7 +426,7 @@ public void testCompactionAbort() throws Exception { runStatementOnDriver("insert into T values(1,4)");//makes delta_2_2 in T2 //create failed compaction attempt so that compactor txn is aborted - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); runStatementOnDriver("alter table T compact 'minor'"); runWorker(hiveConf); @@ -456,7 +456,7 @@ public void testCompactionAbort() throws Exception { Assert.assertEquals(1, TestTxnDbUtil.countQueryAgent(hiveConf, "select count(*) from TXN_COMPONENTS where TC_WRITEID=" + highestCompactWriteId)); //now make a successful compactor run so that next Cleaner run actually cleans - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); runStatementOnDriver("alter table T compact 'minor'"); runWorker(hiveConf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java index ba2b75cd9b07..5b243d2022b1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java @@ -28,7 +28,6 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.junit.Assert; @@ -88,9 +87,9 @@ public void setUp() throws Exception { } void setUpInternalExtended(boolean isOrcFormat) throws Exception { - hiveConf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setBoolVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING, true); + hiveConf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "true"); hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java index 95cf36f6e3ba..9b7fab9ac704 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnExIm.java @@ -584,9 +584,9 @@ public void testMMExportAborted() throws Exception { runStatementOnDriver("create table T (a int, b int)"); runStatementOnDriver("create table Tstage (a int, b int)"); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); runStatementOnDriver("insert into Tstage" + TestTxnCommands2.makeValuesClause(dataAbort)); - HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); runStatementOnDriver("insert into Tstage" + TestTxnCommands2.makeValuesClause(data)); runStatementOnDriver("export table Tstage to '" + getWarehouseDir() + "/1'"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java index a00886bb9cd9..7409179f4368 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnNoBuckets.java @@ -19,10 +19,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.Constants; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -41,7 +38,6 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -311,7 +307,7 @@ public void testCtasEmpty() throws Exception { @Test public void testInsertToAcidWithUnionRemove() throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.close(); d = new Driver(hiveConf); int[][] values = {{1,2},{3,4},{5,6},{7,8},{9,10}}; @@ -350,7 +346,7 @@ public void testInsertToAcidWithUnionRemove() throws Exception { @Test public void testInsertOverwriteToAcidWithUnionRemove() throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.close(); d = new Driver(hiveConf); int[][] values = {{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}}; @@ -374,9 +370,9 @@ public void testInsertOverwriteToAcidWithUnionRemove() throws Exception { @Test public void testToAcidConversionMultiBucket() throws Exception { //need to disable these so that automatic merge doesn't merge the files - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPFILES, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMERGEMAPREDFILES, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMERGETEZFILES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_MERGE_MAPFILES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_MERGE_MAPRED_FILES, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_MERGE_TEZ_FILES, false); d.close(); d = new Driver(hiveConf); @@ -404,7 +400,7 @@ public void testToAcidConversionMultiBucket() throws Exception { //now do Insert from Union here to create data files in sub dirs hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_UNION_REMOVE, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); d.close(); d = new Driver(hiveConf); runStatementOnDriver("insert into T(a,b) select a * 10, b * 10 from " + Table.ACIDTBL + @@ -665,7 +661,7 @@ public void testCtasPartitioned() throws Exception { @Test public void testNonAcidToAcidVectorzied() throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ENABLED, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); + hiveConf.setVar(HiveConf.ConfVars.HIVE_FETCH_TASK_CONVERSION, "none"); //this enables vectorization of ROW__ID hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_VECTORIZATION_ROW_IDENTIFIER_ENABLED, true);//HIVE-12631 runStatementOnDriver("drop table if exists T"); @@ -773,7 +769,7 @@ private void checkExpected(List rs, String[][] expected, String msg) { */ @Test public void testCompactStatsGather() throws Exception { - hiveConf.setIntVar(HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, -1); + hiveConf.setIntVar(HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, -1); runStatementOnDriver("drop table if exists T"); runStatementOnDriver("create table T(a int, b int) partitioned by (p int, q int) " + "stored as orc TBLPROPERTIES ('transactional'='true')"); @@ -916,7 +912,7 @@ public void testEmptyCompactionResult() throws Exception { */ @Test public void testGetPartitionsNoSession() throws Exception { - hiveConf.setIntVar(HiveConf.ConfVars.HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD, -1); + hiveConf.setIntVar(HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, -1); runStatementOnDriver("drop table if exists T"); runStatementOnDriver("create table T(a int, b int) partitioned by (p int, q int) " + "stored as orc TBLPROPERTIES ('transactional'='true')"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index 03c4d4f23da3..a2dd2d5c739b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -119,23 +119,23 @@ void setUpInternal() throws Exception { + File.separator + "mapred" + File.separator + "staging"); hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, getWarehouseDir()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); HiveConf.setBoolVar(hiveConf, HiveConf.ConfVars.SPLIT_UPDATE, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); hiveConf.setBoolean("mapred.input.dir.recursive", true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON, true); MetastoreConf.setBoolVar(hiveConf, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON, true); TestTxnDbUtil.setConfValues(hiveConf); - txnHandler = TxnUtils.getTxnStore(hiveConf); TestTxnDbUtil.prepDb(hiveConf); + txnHandler = TxnUtils.getTxnStore(hiveConf); File f = new File(getWarehouseDir()); if (f.exists()) { FileUtil.fullyDelete(f); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/PartitionUtil.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/PartitionUtil.java index f449c79760fd..59179ebb51c0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/PartitionUtil.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/PartitionUtil.java @@ -18,19 +18,25 @@ package org.apache.hadoop.hive.ql.exec; +import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.CheckResult; import org.apache.hadoop.hive.metastore.IMetaStoreClient; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.stats.StatsUtils; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hadoop.util.StringUtils; import java.util.Arrays; +import java.util.ArrayList; import java.util.HashSet; +import java.util.List; import java.util.Set; import static org.junit.Assert.fail; @@ -82,4 +88,26 @@ public static Set createPartsNotInMs(int numOfParts } return partsNotInMs; } + + + public static void addPartitions(IMetaStoreClient db, String dbName, String tableName, String location, + HiveConf hiveConf, int numPartitions) throws Exception { + List partitions = new ArrayList<>(); + for (int i = 0; i < numPartitions; i++) { + partitions.add(buildPartition(dbName, tableName, String.valueOf(i), location + "/city=" + i, hiveConf)); + } + db.add_partitions(partitions, true, true); + } + + protected static Partition buildPartition(String dbName, String tableName, String value, + String location, HiveConf hiveConf) throws MetaException { + return new PartitionBuilder() + .setDbName(dbName) + .setTableName(tableName) + .addValue(value) + .addCol("test_id", "int", "test col id") + .addCol("test_value", "string", "test col value") + .setLocation(location) + .build(hiveConf); + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestConcurrentDppInserts.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestConcurrentDppInserts.java index 1288c10d32ec..8595b25bc670 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestConcurrentDppInserts.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestConcurrentDppInserts.java @@ -34,6 +34,7 @@ import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.ClassRule; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestRule; @@ -42,6 +43,7 @@ import com.google.common.base.Joiner; +@Ignore public class TestConcurrentDppInserts { static final private Logger LOG = LoggerFactory.getLogger(TestConcurrentDppInserts.class.getName()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java index 801133d85c61..00a58c4cea6d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java @@ -89,8 +89,8 @@ public class TestExecDriver { try { queryState = new QueryState.Builder().withHiveConf(new HiveConf(ExecDriver.class)).build(); conf = queryState.getConf(); - conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true); - conf.setBoolVar(HiveConf.ConfVars.SUBMITLOCALTASKVIACHILD, true); + conf.setBoolVar(HiveConf.ConfVars.SUBMIT_VIA_CHILD, true); + conf.setBoolVar(HiveConf.ConfVars.SUBMIT_LOCAL_TASK_VIA_CHILD, true); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java index 2ea15cf4924f..099c12bd686c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestExplainTask.java @@ -210,7 +210,7 @@ public void testGetJSONDependenciesJsonShhouldMatch() throws Exception { @Test public void testGetJSONPlan() throws Exception { - uut.conf.setVar(HiveConf.ConfVars.HIVESTAGEIDREARRANGE, "EXECUTION"); + uut.conf.setVar(HiveConf.ConfVars.HIVE_STAGE_ID_REARRANGE, "EXECUTION"); Task mockTask = mockTask(); when(mockTask.getId()).thenReturn("mockTaskId"); ExplainWork explainWorkMock = mockExplainWork(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java index eae7f69fbca0..ce6a495a37ea 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestFileSinkOperator.java @@ -313,7 +313,7 @@ public void setup() throws Exception { TFSOStatsPublisher.class.getName()); jc.set(HiveConf.ConfVars.HIVE_STATS_DEFAULT_AGGREGATOR.varname, TFSOStatsAggregator.class.getName()); - jc.set(HiveConf.ConfVars.HIVESTATSDBCLASS.varname, "custom"); + jc.set(HiveConf.ConfVars.HIVE_STATS_DBCLASS.varname, "custom"); } @After diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionAuthWithBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionAuthWithBatches.java new file mode 100644 index 000000000000..191d211d4b72 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionAuthWithBatches.java @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; + +import org.apache.hadoop.hive.metastore.api.GetPartitionsPsWithAuthRequest; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.Assert; +import org.junit.After; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; +import org.mockito.ArgumentCaptor; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class TestGetPartitionAuthWithBatches { + + private final String catName = "hive"; + private final String dbName = "default"; + private final String tableName = "test_partition_batch_with_auth"; + private static HiveConf hiveConf; + private static HiveMetaStoreClient msc; + private static Hive hive; + private Table table; + private final static int NUM_PARTITIONS = 30; + private final static int DECAYING_FACTOR = 2; + private final static int MAX_RETRIES = 0; + private final static boolean IS_AUTH_REQUIRED = true; + private final static String USER_NAME = "username"; + private final static List GROUP_NAMES = Arrays.asList("Grp1", "Grp2"); + private final static Map PARTIAL_PARTITION_SPEC = null; + + @BeforeClass + public static void setupClass() throws HiveException { + hiveConf = new HiveConf(TestGetPartitionAuthWithBatches.class); + hiveConf.set("hive.security.authorization.enabled", "true"); + hiveConf.set("hive.security.authorization.manager","org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider"); + hive = Hive.get(); + SessionState.start(hiveConf); + try { + msc = new HiveMetaStoreClient(hiveConf); + } catch (MetaException e) { + throw new HiveException(e); + } + } + + @Before + public void before() throws Exception { + PartitionUtil.createPartitionedTable(msc, catName, dbName, tableName); + table = msc.getTable(catName, dbName, tableName); + PartitionUtil.addPartitions(msc, dbName, tableName, table.getSd().getLocation(), hiveConf, NUM_PARTITIONS); + } + + @After + public void after() throws Exception { + PartitionUtil.cleanUpTableQuietly(msc, catName, dbName, tableName); + } + + @Test + public void testNumberOfPartitionsRetrieved() throws HiveException { + List numParts = hive.getPartitionNames(dbName, tableName, (short)-1); + Assert.assertEquals(numParts.size(), NUM_PARTITIONS); + List partitions = hive.getPartitionsAuthByNames(new org.apache.hadoop.hive.ql.metadata.Table(table), + numParts.subList(0,5), USER_NAME, GROUP_NAMES); + Assert.assertEquals(partitions.size(), 5); + } + + /** + * Tests the number of partitions recieved from the HMS + * + * @throws Exception + */ + @Test + public void testGetPartitionsAPI() throws Exception { + List part = hive.getPartitions(hive.getTable(dbName, tableName)); + Assert.assertEquals(part.size(), NUM_PARTITIONS); + } + + @Test + public void testGetPartitionsAPI2() throws Exception { + List part = hive.getPartitions(hive.getTable(dbName, tableName), + new HashMap() , (short) -1); + Assert.assertEquals(part.size(), NUM_PARTITIONS); + } + + @Test + public void testGetPartitionsAPI2limit() throws Exception { + List part = hive.getPartitions(hive.getTable(dbName, tableName), + new HashMap() , (short) 1); + Assert.assertEquals(part.size(), 1); + + List part1 = hive.getPartitions(hive.getTable(dbName, tableName), + new HashMap() , (short) 10); + Assert.assertEquals(part1.size(), 10); + } + + /** + * Tests the number of times Hive.getPartitions calls are executed with total number of + * partitions to be added are equally divisible by batch size + * + * @throws Exception + */ + @Test + public void testNumberOfGetPartitionCalls() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + // test with a batch size of 10 and decaying factor of 2 + int batchSize = 10; + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), batchSize, DECAYING_FACTOR, MAX_RETRIES, + PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // there should be 3 calls to get partitions + verify(spyMSC, times(3)).listPartitionsWithAuthInfoRequest(req.capture()); + req.getAllValues().forEach(part-> Assert.assertEquals(part.getPartNames().size(),10)); + } + + @Test + public void testNumberOfGetPartitionCalls2() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + // test with a batch size of 10 and decaying factor of 2 + int batchSize = 10; + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), batchSize, DECAYING_FACTOR, MAX_RETRIES, + new HashMap(), IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // there should be 3 calls to get partitions + verify(spyMSC, times(3)).listPartitionsWithAuthInfoRequest(req.capture()); + req.getAllValues().forEach(part-> Assert.assertEquals(part.getPartNames().size(), 10)); + } + + /** + * Tests the number of times Hive.getAllPartitionsOf calls are executed with total number of + * partitions to be added are not exactly divisible by batch size + * + * @throws Exception + */ + @Test + public void testUnevenNumberOfGetPartitionCalls() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + // there should be 2 calls to get partitions with batch sizes of 19, 11 + int batchSize = 19; + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),batchSize, DECAYING_FACTOR, MAX_RETRIES, + PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // there should be 2 calls to get partitions + verify(spyMSC, times(2)).listPartitionsWithAuthInfoRequest(req.capture()); + // confirm the batch sizes were 19, 11 in the two calls to get partitions + List apds = req.getAllValues(); + Assert.assertEquals(19, apds.get(0).getPartNames().size()); + Assert.assertEquals(11, apds.get(1).getPartNames().size()); + } + + /** + * Tests the number of times Hive.getAllPartitionsOf calls are executed with total number of + * partitions to is less than batch size + * + * @throws Exception + */ + @Test + public void testSmallNumberOfPartitions() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + int batchSize = 100; + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),batchSize, DECAYING_FACTOR, MAX_RETRIES, + PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // there should be 1 call to get partitions + verify(spyMSC, times(1)).listPartitionsWithAuthInfoRequest(req.capture()); + Assert.assertEquals(NUM_PARTITIONS, req.getValue().getPartNames().size()); + } + + /** + * Tests the retries exhausted case when getAllPartitionsOf method call always keep throwing + * HiveException. The batch sizes should exponentially decreased based on the decaying factor and + * ultimately give up when it reaches 0 + * + * @throws Exception + */ + @Test + public void testRetriesExhaustedBatchSize() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + doThrow(MetaException.class).when(spyMSC).listPartitionsWithAuthInfoRequest(any()); + try { + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), NUM_PARTITIONS, DECAYING_FACTOR, MAX_RETRIES, + PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + } catch (HiveException ignored) {} + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // there should be 5 call to get partitions with batch sizes as 30, 15, 7, 3, 1 + verify(spyMSC, times(5)).listPartitionsWithAuthInfoRequest(req.capture()); + List apds = req.getAllValues(); + Assert.assertEquals(5, apds.size()); + + Assert.assertEquals(30, apds.get(0).getPartNamesSize()); + Assert.assertEquals(15, apds.get(1).getPartNamesSize()); + Assert.assertEquals(7, apds.get(2).getPartNamesSize()); + Assert.assertEquals(3, apds.get(3).getPartNamesSize()); + Assert.assertEquals(1, apds.get(4).getPartNamesSize()); + } + + /** + * Tests the maximum retry attempts provided by configuration + * @throws Exception + */ + @Test + public void testMaxRetriesReached() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + doThrow(MetaException.class).when(spyMSC).listPartitionsWithAuthInfoRequest(any()); + int maxRetries = 2; + try { + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), NUM_PARTITIONS, DECAYING_FACTOR, maxRetries, + PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + } catch (HiveException ignored) {} + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // there should be 2 call to get partitions with batch sizes as 30, 15 + verify(spyMSC, times(2)).listPartitionsWithAuthInfoRequest(req.capture()); + List apds = req.getAllValues(); + Assert.assertEquals(2, apds.size()); + + Assert.assertEquals(30, apds.get(0).getPartNamesSize()); + Assert.assertEquals(15, apds.get(1).getPartNamesSize()); + } + + /** + * Tests the number of calls to getPartitions and the respective batch sizes when first call to + * getPartitions throws HiveException. The batch size should be reduced by the decayingFactor + * and the second call should fetch all the results + * + * @throws Exception + */ + @Test + public void testBatchingWhenException() throws Exception { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + // This will throw exception only the first time. + doThrow(new MetaException()).doCallRealMethod() + .when(spyMSC).listPartitionsWithAuthInfoRequest(any()); + + int maxRetries = 5; + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), NUM_PARTITIONS, DECAYING_FACTOR, maxRetries, + PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED, USER_NAME, GROUP_NAMES); + ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsPsWithAuthRequest.class); + // The first call with batch size of 30 will fail, the rest two call will be of size 15 each. Total 3 calls + verify(spyMSC, times(3)).listPartitionsWithAuthInfoRequest(req.capture()); + List apds = req.getAllValues(); + Assert.assertEquals(3, apds.size()); + + Assert.assertEquals(30, apds.get(0).getPartNamesSize()); + Assert.assertEquals(15, apds.get(1).getPartNamesSize()); + Assert.assertEquals(15, apds.get(2).getPartNamesSize()); + + Set partNames = new HashSet<>(apds.get(1).getPartNames()); + partNames.addAll(apds.get(2).getPartNames()); + assert(partNames.size() == NUM_PARTITIONS); + + List partitionNames = hive.getPartitionNames(table.getDbName(),table.getTableName(), (short) -1); + assert(partitionNames.size() == NUM_PARTITIONS); + partitionNames.forEach(partNames::remove); + assert(partitionNames.size() == NUM_PARTITIONS); + // In case any duplicate/incomplete list is given by hive.getAllPartitionsInBatches, the below assertion will fail + assert(partNames.size() == 0); + } +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionInBatches.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionInBatches.java index 246afeb79217..d4afff716bf4 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionInBatches.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetPartitionInBatches.java @@ -23,11 +23,12 @@ import org.apache.hadoop.hive.metastore.api.GetPartitionsByNamesRequest; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.MetastoreException; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.PartitionIterable; +import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; import org.junit.After; @@ -36,10 +37,10 @@ import org.junit.Test; import org.mockito.ArgumentCaptor; - -import java.util.ArrayList; +import java.util.HashMap; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Set; import static org.mockito.ArgumentMatchers.any; @@ -57,6 +58,9 @@ public class TestGetPartitionInBatches { private static HiveMetaStoreClient msc; private static Hive hive; private Table table; + private final static int NUM_PARTITIONS = 30; + private final static boolean IS_AUTH_REQUIRED = false; + private final static Map PARTIAL_PARTITION_SPEC = null; @BeforeClass public static void setupClass() throws HiveException { @@ -74,7 +78,7 @@ public static void setupClass() throws HiveException { public void before() throws Exception { PartitionUtil.createPartitionedTable(msc, catName, dbName, tableName); table = msc.getTable(catName, dbName, tableName); - addPartitions(dbName, tableName); + PartitionUtil.addPartitions(msc, dbName, tableName, table.getSd().getLocation(), hiveConf, NUM_PARTITIONS); } @After @@ -82,24 +86,13 @@ public void after() throws Exception { PartitionUtil.cleanUpTableQuietly(msc, catName, dbName, tableName); } - private void addPartitions(String dbName, String tableName) throws Exception { - List partitions = new ArrayList<>(); - for (int i = 0; i < 30; i++) { - partitions.add(buildPartition(dbName, tableName, String.valueOf(i), table.getSd().getLocation() + "/city=" + i)); - } - msc.add_partitions(partitions, true, true); - } - - protected Partition buildPartition(String dbName, String tableName, String value, - String location) throws MetaException { - return new PartitionBuilder() - .setDbName(dbName) - .setTableName(tableName) - .addValue(value) - .addCol("test_id", "int", "test col id") - .addCol("test_value", "string", "test col value") - .setLocation(location) - .build(hiveConf); + @Test + public void TestNumberOfPartitionsRetrieved() throws HiveException { + List numParts = hive.getPartitionNames(dbName, tableName, (short)-1); + Assert.assertEquals(numParts.size(), NUM_PARTITIONS); + List partitions = hive.getPartitionsByNames(new org.apache.hadoop.hive.ql.metadata.Table(table), + numParts.subList(0,5), false); + Assert.assertEquals(partitions.size(), 5); } /** @@ -108,9 +101,9 @@ protected Partition buildPartition(String dbName, String tableName, String value * @throws Exception */ @Test - public void testgetAllPartitionsOf() throws Exception { + public void testGetAllPartitionsOf() throws Exception { Set part = hive.getAllPartitionsOf(hive.getTable(dbName, tableName)); - Assert.assertEquals(part.size(), 30); + Assert.assertEquals(part.size(), NUM_PARTITIONS); } /** @@ -124,7 +117,7 @@ public void testNumberOfGetPartitionCalls() throws Exception { HiveMetaStoreClient spyMSC = spy(msc); hive.setMSC(spyMSC); // test with a batch size of 10 and decaying factor of 2 - hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),10, 2, 0); + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),10, 2, 0, PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED); ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsByNamesRequest.class); // there should be 3 calls to get partitions verify(spyMSC, times(3)).getPartitionsByNames(req.capture()); @@ -142,7 +135,7 @@ public void testUnevenNumberOfGetPartitionCalls() throws Exception { HiveMetaStoreClient spyMSC = spy(msc); hive.setMSC(spyMSC); // there should be 2 calls to get partitions with batch sizes of 19, 11 - hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),19, 2, 0); + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),19, 2, 0, PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED); ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsByNamesRequest.class); // there should be 2 calls to get partitions verify(spyMSC, times(2)).getPartitionsByNames(req.capture()); @@ -162,7 +155,7 @@ public void testUnevenNumberOfGetPartitionCalls() throws Exception { public void testSmallNumberOfPartitions() throws Exception { HiveMetaStoreClient spyMSC = spy(msc); hive.setMSC(spyMSC); - hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),100, 2, 0); + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName),100, 2, 0, PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED); ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsByNamesRequest.class); // there should be 1 call to get partitions verify(spyMSC, times(1)).getPartitionsByNames(req.capture()); @@ -182,7 +175,7 @@ public void testRetriesExhaustedBatchSize() throws Exception { hive.setMSC(spyMSC); doThrow(MetaException.class).when(spyMSC).getPartitionsByNames(any()); try { - hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), 30, 2, 0); + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), 30, 2, 0, PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED); } catch (Exception ignored) {} ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsByNamesRequest.class); // there should be 5 call to get partitions with batch sizes as 30, 15, 7, 3, 1 @@ -207,7 +200,7 @@ public void testMaxRetriesReached() throws Exception { hive.setMSC(spyMSC); doThrow(MetaException.class).when(spyMSC).getPartitionsByNames(any()); try { - hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), 30, 2, 2); + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), 30, 2, 2, PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED); } catch (Exception ignored) {} ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsByNamesRequest.class); // there should be 2 call to get partitions with batch sizes as 30, 15 @@ -234,7 +227,7 @@ public void testBatchingWhenException() throws Exception { doThrow(new MetaException()).doCallRealMethod() .when(spyMSC).getPartitionsByNames(any()); - hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), 30, 2, 5); + hive.getAllPartitionsInBatches(hive.getTable(dbName, tableName), 30, 2, 5, PARTIAL_PARTITION_SPEC, IS_AUTH_REQUIRED); ArgumentCaptor req = ArgumentCaptor.forClass(GetPartitionsByNamesRequest.class); // The first call with batch size of 30 will fail, the rest two call will be of size 15 each. Total 3 calls verify(spyMSC, times(3)).getPartitionsByNames(req.capture()); @@ -256,4 +249,23 @@ public void testBatchingWhenException() throws Exception { // In case any duplicate/incomplete list is given by hive.getAllPartitionsInBatches, the below assertion will fail assert(partNames.size() == 0); } + + @Test + public void testBatchingWhenBatchSizeIsZero() throws MetaException { + HiveMetaStoreClient spyMSC = spy(msc); + hive.setMSC(spyMSC); + int batchSize = 0; + try { + new PartitionIterable(hive, hive.getTable(dbName, tableName), null, batchSize); + } catch (HiveException e) { + Assert.assertTrue(e.getMessage().contains("Invalid batch size for partition iterable." + + " Please use a batch size greater than 0")); + } + try { + new org.apache.hadoop.hive.metastore.PartitionIterable(msc, table, batchSize); + } catch (MetastoreException e) { + Assert.assertTrue(e.getMessage().contains("Invalid batch size for partition iterable." + + " Please use a batch size greater than 0")); + } + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java index 681435c65c8b..0c5679b223ec 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestLimitOperator.java @@ -64,7 +64,7 @@ private void testGlobalLimitReachedInDaemonOrContainer(boolean isDaemon, int off } HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, "query-" + random.nextInt(10000)); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "query-" + random.nextInt(10000)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "tez"); conf.set(TezProcessor.HIVE_TEZ_VERTEX_NAME, "Map 1"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index c82fdf3a1d9a..dcf0483cf057 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -200,11 +200,11 @@ public void testScriptOperatorEnvVarsProcessing() throws Throwable { hconf.set("name", hugeEnvVar); Map env = new HashMap(); - HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV, false); + HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_TRUNCATE_ENV, false); scriptOperator.addJobConfToEnvironment(hconf, env); assertEquals(20*1024+1, env.get("name").length()); - HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVESCRIPTTRUNCATEENV, true); + HiveConf.setBoolVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_TRUNCATE_ENV, true); scriptOperator.addJobConfToEnvironment(hconf, env); assertEquals(20*1024, env.get("name").length()); @@ -223,7 +223,7 @@ public void testScriptOperatorBlacklistedEnvVarsProcessing() { Map env = new HashMap(); - HiveConf.setVar(hconf, HiveConf.ConfVars.HIVESCRIPT_ENV_BLACKLIST, "foobar"); + HiveConf.setVar(hconf, HiveConf.ConfVars.HIVE_SCRIPT_ENV_BLACKLIST, "foobar"); hconf.set("foobar", "foobar"); hconf.set("barfoo", "barfoo"); scriptOperator.addJobConfToEnvironment(hconf, env); @@ -423,7 +423,7 @@ public InputSplit[] getSplits(JobConf job, int splits) throws IOException { public void testFetchOperatorContext() throws Exception { HiveConf conf = new HiveConf(); conf.set("hive.support.concurrency", "false"); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(conf); @@ -459,7 +459,7 @@ public void testNoConditionalTaskSizeForLlap() { ConvertJoinMapJoin convertJoinMapJoin = new ConvertJoinMapJoin(); long defaultNoConditionalTaskSize = 1024L * 1024L * 1024L; HiveConf hiveConf = new HiveConf(); - hiveConf.setLongVar(HiveConf.ConfVars.HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD, defaultNoConditionalTaskSize); + hiveConf.setLongVar(HiveConf.ConfVars.HIVE_CONVERT_JOIN_NOCONDITIONAL_TASK_THRESHOLD, defaultNoConditionalTaskSize); LlapClusterStateForCompile llapInfo = null; if ("llap".equalsIgnoreCase(hiveConf.getVar(HiveConf.ConfVars.HIVE_EXECUTION_MODE))) { @@ -577,7 +577,7 @@ public void testLlapMemoryOversubscriptionMaxExecutorsPerQueryCalculation() { // 5. Configure hive conf and Build group by operator HiveConf hconf = new HiveConf(); - HiveConf.setIntVar(hconf, HiveConf.ConfVars.HIVEGROUPBYMAPINTERVAL, 1); + HiveConf.setIntVar(hconf, HiveConf.ConfVars.HIVE_GROUPBY_MAP_INTERVAL, 1); // 6. test hash aggr without grouping sets System.out.println("---------------Begin to test hash group by without grouping sets-------------"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index 15106909734e..f87d6c40f17f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -36,7 +36,6 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; -import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; @@ -345,7 +344,7 @@ public void testGetInputPathsWithEmptyPartitions() throws Exception { List inputPaths = new ArrayList<>(); try { - Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); List inputPaths1 = Utilities.getInputPaths(jobConf, mapWork1, scratchDir, mock(Context.class), false); @@ -414,7 +413,7 @@ public void testGetInputPathsWithMultipleThreadsAndEmptyPartitions() throws Exce try { fs.mkdirs(testTablePath); List inputPaths = Utilities.getInputPaths(jobConf, mapWork, - new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCALSCRATCHDIR)), mock(Context.class), false); + new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)), mock(Context.class), false); assertEquals(inputPaths.size(), numPartitions); for (int i = 0; i < numPartitions; i++) { @@ -542,7 +541,7 @@ public void testGetInputPathsWithMultipleThreads() throws Exception { private void runTestGetInputPaths(JobConf jobConf, int numOfPartitions) throws Exception { MapWork mapWork = new MapWork(); - Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCALSCRATCHDIR)); + Path scratchDir = new Path(HiveConf.getVar(jobConf, HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); Map> pathToAliasTable = new LinkedHashMap<>(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java index e95e9c32918d..404ac4a41e07 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/errors/TestTaskLogProcessor.java @@ -231,7 +231,7 @@ public void testMapAggrMemErrorHeuristic() throws Exception { String solution = eas.getSolution(); assertNotNull(solution); assertTrue(solution.length() > 0); - String confName = HiveConf.ConfVars.HIVEMAPAGGRHASHMEMORY.toString(); + String confName = HiveConf.ConfVars.HIVE_MAP_AGGR_HASH_MEMORY.toString(); assertTrue(solution.contains(confName)); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java index 40712eb96d48..d4f983c62c03 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/mr/TestMapRedTask.java @@ -69,7 +69,7 @@ public void mrTaskSumbitViaChildWithImpersonation() throws IOException, LoginExc QueryState queryState = new QueryState.Builder().build(); HiveConf conf= queryState.getConf(); - conf.setBoolVar(HiveConf.ConfVars.SUBMITVIACHILD, true); + conf.setBoolVar(HiveConf.ConfVars.SUBMIT_VIA_CHILD, true); MapredWork mrWork = new MapredWork(); mrWork.setMapWork(Mockito.mock(MapWork.class)); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezOutputCommitter.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezOutputCommitter.java index 01df5bc326d9..29f04dc3ba9d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezOutputCommitter.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezOutputCommitter.java @@ -122,7 +122,7 @@ private IDriver getDriverWithCommitter(String committerClass) { conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - conf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); conf.setInt("tez.am.task.max.failed.attempts", MAX_TASK_ATTEMPTS); conf.set("mapred.output.committer.class", committerClass); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java index 74a4ad0b61b9..ef2232c626ab 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/TestVectorLimitOperator.java @@ -87,7 +87,7 @@ private void testGlobalLimitReachedInDaemonOrContainer(boolean isDaemon, int off } HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEQUERYID, "query-" + random.nextInt(10000)); + HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_QUERY_ID, "query-" + random.nextInt(10000)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "tez"); conf.set(TezProcessor.HIVE_TEZ_VERTEX_NAME, "Map 1"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java index f5bc5bab5a25..b6284aff329f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHiveHooks.java @@ -61,9 +61,9 @@ public static class SemanticAnalysisHook implements HiveSemanticAnalyzerHook { @Test public void testLoadHooksFromConf() throws Exception { HiveConf hiveConf = new HiveConf(); - hiveConf.setVar(HiveConf.ConfVars.PREEXECHOOKS, + hiveConf.setVar(HiveConf.ConfVars.PRE_EXEC_HOOKS, PreExecHook.class.getName() + "," + PreExecHook.class.getName()); - hiveConf.setVar(HiveConf.ConfVars.POSTEXECHOOKS, + hiveConf.setVar(HiveConf.ConfVars.POST_EXEC_HOOKS, PostExecHook.class.getName()); hiveConf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, SemanticAnalysisHook.class.getName()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java index f1a9a44e1fe5..e0dccc9f5834 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/hooks/TestHooks.java @@ -55,7 +55,7 @@ public void testRedactLogString() throws Exception { HiveConf conf = new HiveConf(TestHooks.class); String str; - HiveConf.setVar(conf, HiveConf.ConfVars.QUERYREDACTORHOOKS, SimpleQueryRedactor.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.QUERY_REDACTOR_HOOKS, SimpleQueryRedactor.class.getName()); str = HookUtils.redactLogString(null, null); assertEquals(str, null); @@ -70,7 +70,7 @@ public void testRedactLogString() throws Exception { @Test public void testQueryRedactor() throws Exception { HiveConf conf = new HiveConf(TestHooks.class); - HiveConf.setVar(conf, HiveConf.ConfVars.QUERYREDACTORHOOKS, + HiveConf.setVar(conf, HiveConf.ConfVars.QUERY_REDACTOR_HOOKS, SimpleQueryRedactor.class.getName()); conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java b/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java index 0adaf0a700eb..a929bb9d820f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/StorageFormats.java @@ -26,16 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.io.RCFileInputFormat; -import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; -import org.apache.hadoop.hive.ql.io.RCFileStorageFormatDescriptor; -import org.apache.hadoop.hive.ql.io.StorageFormatDescriptor; -import org.apache.hadoop.hive.ql.io.avro.AvroContainerInputFormat; -import org.apache.hadoop.hive.ql.io.avro.AvroContainerOutputFormat; -import org.apache.hadoop.hive.ql.io.orc.OrcSerde; -import org.apache.hadoop.hive.serde2.avro.AvroSerDe; import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; -import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import static org.junit.Assert.assertTrue; @@ -104,9 +95,9 @@ public static Collection asParameters() { String serdeClass = descriptor.getSerde(); if (serdeClass == null) { if (descriptor instanceof RCFileStorageFormatDescriptor) { - serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTRCFILESERDE); + serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_RCFILE_SERDE); } else { - serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEDEFAULTSERDE); + serdeClass = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_DEFAULT_SERDE); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java index 73096b36720b..073b930531b2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/TestRCFile.java @@ -643,7 +643,7 @@ public void testSync() throws IOException { RCFileInputFormat inputFormat = new RCFileInputFormat(); JobConf jobconf = new JobConf(cloneConf); jobconf.set("mapred.input.dir", testDir.toString()); - HiveConf.setLongVar(jobconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, fileLen); + HiveConf.setLongVar(jobconf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, fileLen); InputSplit[] splits = inputFormat.getSplits(jobconf, 1); RCFileRecordReader rr = new RCFileRecordReader(jobconf, (FileSplit)splits[0]); long lastSync = 0; @@ -710,7 +710,7 @@ private void writeThenReadByRecordReader(int intervalRecordCount, RCFileInputFormat inputFormat = new RCFileInputFormat(); JobConf jonconf = new JobConf(cloneConf); jonconf.set("mapred.input.dir", testDir.toString()); - HiveConf.setLongVar(jonconf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, minSplitSize); + HiveConf.setLongVar(jonconf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, minSplitSize); InputSplit[] splits = inputFormat.getSplits(jonconf, splitNumber); assertEquals("splits length should be " + splitNumber, splitNumber, splits.length); int readCount = 0; @@ -796,7 +796,7 @@ public void testRCFileHeader(char[] expected, Configuration conf) @Test public void testNonExplicitRCFileHeader() throws IOException, SerDeException { Configuration conf = new Configuration(); - conf.setBoolean(HiveConf.ConfVars.HIVEUSEEXPLICITRCFILEHEADER.varname, false); + conf.setBoolean(HiveConf.ConfVars.HIVE_USE_EXPLICIT_RCFILE_HEADER.varname, false); char[] expected = new char[] {'S', 'E', 'Q'}; testRCFileHeader(expected, conf); } @@ -804,7 +804,7 @@ public void testNonExplicitRCFileHeader() throws IOException, SerDeException { @Test public void testExplicitRCFileHeader() throws IOException, SerDeException { Configuration conf = new Configuration(); - conf.setBoolean(HiveConf.ConfVars.HIVEUSEEXPLICITRCFILEHEADER.varname, true); + conf.setBoolean(HiveConf.ConfVars.HIVE_USE_EXPLICIT_RCFILE_HEADER.varname, true); char[] expected = new char[] {'R', 'C', 'F'}; testRCFileHeader(expected, conf); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index e885c634c054..c986d10ab4cf 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -1728,8 +1728,8 @@ public void testSplitGenerator() throws Exception { new MockBlock("host0", "host3-2", "host3-3"), new MockBlock("host4-1", "host4-2", "host4-3"), new MockBlock("host5-1", "host5-2", "host5-3"))); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 300); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 200); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 300); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 200); OrcInputFormat.Context context = new OrcInputFormat.Context(conf); OrcInputFormat.SplitGenerator splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, @@ -1752,8 +1752,8 @@ public void testSplitGenerator() throws Exception { assertEquals(1800, result.getStart()); assertEquals(200, result.getLength()); // test min = 0, max = 0 generates each stripe - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 0); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 0); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, fs.getFileStatus(new Path("/a/file")), null, null, true, @@ -1777,8 +1777,8 @@ public void testProjectedColumnSize() throws Exception { new MockBlock("host0", "host3-2", "host3-3"), new MockBlock("host4-1", "host4-2", "host4-3"), new MockBlock("host5-1", "host5-2", "host5-3"))); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 300); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 200); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 300); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 200); conf.setBoolean(ColumnProjectionUtils.READ_ALL_COLUMNS, false); conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR, "0"); OrcInputFormat.Context context = new OrcInputFormat.Context(conf); @@ -1802,8 +1802,8 @@ public void testProjectedColumnSize() throws Exception { assertEquals(43792, result.getProjectedColumnsUncompressedSize()); // test min = 0, max = 0 generates each stripe - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 0); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 0); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 0); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, fs.getFileStatus(new Path("/a/file")), null, null, true, @@ -1822,8 +1822,8 @@ public void testProjectedColumnSize() throws Exception { } // single split - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 100000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 100000); context = new OrcInputFormat.Context(conf); splitter = new OrcInputFormat.SplitGenerator(new OrcInputFormat.SplitInfo(context, fs, fs.getFileStatus(new Path("/a/file")), null, null, true, @@ -3785,13 +3785,13 @@ public void testRowNumberUniquenessInDifferentSplits() throws Exception { // Save the conf variable values so that they can be restored later. long oldDefaultStripeSize = conf.getLong(OrcConf.STRIPE_SIZE.getHiveConfName(), -1L); - long oldMaxSplitSize = conf.getLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, -1L); + long oldMaxSplitSize = conf.getLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, -1L); // Set the conf variable values for this test. long newStripeSize = 10000L; // 10000 bytes per stripe long newMaxSplitSize = 100L; // 1024 bytes per split conf.setLong(OrcConf.STRIPE_SIZE.getHiveConfName(), newStripeSize); - conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, newMaxSplitSize); + conf.setLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, newMaxSplitSize); AbstractSerDe serde = new OrcSerde(); HiveOutputFormat outFormat = new OrcOutputFormat(); @@ -3838,10 +3838,10 @@ public void testRowNumberUniquenessInDifferentSplits() throws Exception { conf.unset(OrcConf.STRIPE_SIZE.getHiveConfName()); } if (oldMaxSplitSize != -1L) { - conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, oldMaxSplitSize); + conf.setLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, oldMaxSplitSize); } else { // this means that nothing was set for default stripe size previously, so we should unset it. - conf.unset(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname); + conf.unset(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java index f046191ae47e..0b6d57636d38 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcSplitElimination.java @@ -127,8 +127,8 @@ public void testSplitEliminationSmallMaxSplit() throws Exception { 100000, CompressionKind.NONE, 10000, 10000); writeData(writer); writer.close(); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 5000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 5000); InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); @@ -197,8 +197,8 @@ public void testSplitEliminationLargeMaxSplit() throws Exception { 100000, CompressionKind.NONE, 10000, 10000); writeData(writer); writer.close(); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 150000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 150000); InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); @@ -278,8 +278,8 @@ public void testSplitEliminationComplexExpr() throws Exception { 100000, CompressionKind.NONE, 10000, 10000); writeData(writer); writer.close(); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMINSPLITSIZE, 1000); - HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPREDMAXSPLITSIZE, 150000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE, 1000); + HiveConf.setLongVar(conf, HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE, 150000); InputFormat in = new OrcInputFormat(); FileInputFormat.setInputPaths(conf, testFilePath.toString()); @@ -696,10 +696,10 @@ private static String toString(FileSplit fs) { private void setupExternalCacheConfig(boolean isPpd, String paths) { FileInputFormat.setInputPaths(conf, paths); conf.set(ConfVars.HIVE_ORC_SPLIT_STRATEGY.varname, "ETL"); - conf.setLong(HiveConf.ConfVars.MAPREDMINSPLITSIZE.varname, 1000); - conf.setLong(HiveConf.ConfVars.MAPREDMAXSPLITSIZE.varname, 5000); + conf.setLong(HiveConf.ConfVars.MAPRED_MIN_SPLIT_SIZE.varname, 1000); + conf.setLong(HiveConf.ConfVars.MAPRED_MAX_SPLIT_SIZE.varname, 5000); conf.setBoolean(ConfVars.HIVE_ORC_MS_FOOTER_CACHE_PPD.varname, isPpd); - conf.setBoolean(ConfVars.HIVEOPTINDEXFILTER.varname, isPpd); + conf.setBoolean(ConfVars.HIVE_OPT_INDEX_FILTER.varname, isPpd); } private ObjectInspector createIO() { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java index b192da437ea7..2c80d3ee19ee 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestVectorizedOrcAcidRowBatchReader.java @@ -383,13 +383,13 @@ public void testDeleteEventFilteringOn2() throws Exception { @Test public void testDeleteEventFilteringOnWithoutIdx2() throws Exception { HiveConf.setBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS, true); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP, true); testDeleteEventFiltering2(); } @Test public void testDeleteEventFilteringOnWithoutIdx3() throws Exception { HiveConf.setBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS, true); - HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP, true); + HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP, true); conf.set("orc.stripe.size", "1000"); testDeleteEventFiltering(); } @@ -398,7 +398,7 @@ private void testDeleteEventFiltering2() throws Exception { boolean filterOn = HiveConf.getBoolVar(conf, HiveConf.ConfVars.FILTER_DELETE_EVENTS); boolean skipKeyIdx = - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVETESTMODEACIDKEYIDXSKIP); + HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_TEST_MODE_ACID_KEY_IDX_SKIP); int bucket = 1; AcidOutputFormat.Options options = new AcidOutputFormat.Options(conf) .filesystem(fs) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java index e6806bdf7332..17328b1281e3 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/ITestDbTxnManager.java @@ -50,7 +50,7 @@ public static void setupDb() throws Exception { .toLowerCase(); rule = getDatabaseRule(metastoreType).setVerbose(false); - conf.setVar(HiveConf.ConfVars.METASTOREDBTYPE, metastoreType.toUpperCase()); + conf.setVar(HiveConf.ConfVars.METASTORE_DB_TYPE, metastoreType.toUpperCase()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY, rule.getJdbcUrl()); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER, rule.getJdbcDriver()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java index c2d93e0f95ed..b6f533490ffd 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java index dc20b552c1af..250aa7382046 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager2.java @@ -38,7 +38,8 @@ import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.CompactionHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.QueryState; import org.apache.hadoop.hive.ql.io.AcidUtils; @@ -502,7 +503,7 @@ public void testMetastoreTablesCleanup() throws Exception { Assert.assertEquals(5, count); // Fail some inserts, so that we have records in TXN_COMPONENTS - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, true); driver.run("insert into temp.T10 values (9, 9)"); driver.run("insert into temp.T11 values (10, 10)"); driver.run("insert into temp.T12p partition (ds='today', hour='1') values (11, 11)"); @@ -510,7 +511,7 @@ public void testMetastoreTablesCleanup() throws Exception { count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + "where \"TC_DATABASE\"='temp' and \"TC_TABLE\" in ('t10', 't11', 't12p', 't13p')"); Assert.assertEquals(4, count); - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEROLLBACKTXN, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_ROLLBACK_TXN, false); // Drop a table/partition; corresponding records in TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS should disappear count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"TXN_COMPONENTS\" " + @@ -580,7 +581,7 @@ public void testMetastoreTablesCleanup() throws Exception { // Tables need at least 2 delta files to compact, and minor compaction was just run, so insert driver.run("insert into temp.T11 values (14, 14)"); driver.run("insert into temp.T12p partition (ds='tomorrow', hour='2') values (15, 15)"); - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, true); driver.run("alter table temp.T11 compact 'major'"); count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPACTION_QUEUE\" " + "where \"CQ_DATABASE\"='temp' and \"CQ_TABLE\"='t11' and \"CQ_STATE\"='i' and \"CQ_TYPE\"='a'"); @@ -606,7 +607,7 @@ public void testMetastoreTablesCleanup() throws Exception { count = TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_COMPACTIONS\" " + "where \"CC_DATABASE\"='temp' and \"CC_TABLE\"='t12p' and \"CC_STATE\"='f' and \"CC_TYPE\"='a'"); Assert.assertEquals(1, count); - conf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_TEST_MODE_FAIL_COMPACTION, false); // Put 2 records into COMPACTION_QUEUE and do nothing driver.run("alter table temp.T11 compact 'major'"); @@ -3509,8 +3510,11 @@ public void testRemoveDuplicateCompletedTxnComponents() throws Exception { 5, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\"")); MetastoreTaskThread houseKeeper = new AcidHouseKeeperService(); + MetastoreTaskThread compactionHouseKeeper = new CompactionHouseKeeperService(); houseKeeper.setConf(conf); + compactionHouseKeeper.setConf(conf); houseKeeper.run(); + compactionHouseKeeper.run(); Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from \"COMPLETED_TXN_COMPONENTS\""), 2, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\"")); @@ -3527,6 +3531,7 @@ public void testRemoveDuplicateCompletedTxnComponents() throws Exception { 4, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\"")); houseKeeper.run(); + compactionHouseKeeper.run(); Assert.assertEquals(TestTxnDbUtil.queryToString(conf, "select * from \"COMPLETED_TXN_COMPONENTS\""), 3, TestTxnDbUtil.countQueryAgent(conf, "select count(*) from \"COMPLETED_TXN_COMPONENTS\"")); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 33171ebb0cb2..bb7f754fc509 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -32,6 +32,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.regex.Pattern; +import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -51,7 +52,10 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.events.InsertEvent; +import org.apache.hadoop.hive.ql.ddl.table.partition.PartitionUtils; +import org.apache.hadoop.hive.ql.exec.SerializationUtilities; import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; @@ -682,6 +686,45 @@ public void testDropPartitionsWithPurge() throws Exception { } } + @Test + public void testDropMissingPartitionsByFilter() throws Throwable { + String dbName = Warehouse.DEFAULT_DATABASE_NAME; + String tableName = "table_for_testDropMissingPartitionsByFilter"; + + Table table = createPartitionedTable(dbName, tableName); + for (int i = 10; i <= 12; i++) { + Map partitionSpec = new ImmutableMap.Builder() + .put("ds", "20231129") + .put("hr", String.valueOf(i)) + .build(); + hm.createPartition(table, partitionSpec); + } + + List partitions = hm.getPartitions(table); + assertEquals(3, partitions.size()); + + // drop partitions by filter with missing predicate + try { + List> partExprs = new ArrayList<>(); + ExprNodeColumnDesc column = new ExprNodeColumnDesc( + TypeInfoFactory.stringTypeInfo, "ds", null, true); + List values = Arrays.asList("20231130", "20231129"); + for (int i = 0; i < values.size(); i++) { + ExprNodeGenericFuncDesc expr = PartitionUtils.makeBinaryPredicate( + "=", column, new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, values.get(i))); + partExprs.add(Pair.of(i, SerializationUtilities.serializeObjectWithTypeInformation(expr))); + } + hm.dropPartitions(dbName, tableName, partExprs, PartitionDropOptions.instance()); + fail("Expected exception"); + } catch (HiveException e) { + // expected + assertEquals("Some partitions to drop are missing", e.getCause().getMessage()); + assertEquals(3, hm.getPartitions(table).size()); + } finally { + cleanUpTableQuietly(dbName, tableName); + } + } + /** * Test that tables set up with auto-purge skip trash-directory when tables/partitions are dropped. * @throws Throwable @@ -943,8 +986,8 @@ public void testHiveRefreshOnConfChange() throws Throwable{ prevHiveObj.getDatabaseCurrent(); //change value of a metavar config param in new hive conf newHconf = new HiveConf(hiveConf); - newHconf.setIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES, - newHconf.getIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES) + 1); + newHconf.setIntVar(ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES, + newHconf.getIntVar(ConfVars.METASTORE_THRIFT_CONNECTION_RETRIES) + 1); newHiveObj = Hive.get(newHconf); assertTrue(prevHiveObj != newHiveObj); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMSCKRepairOnAcid.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMSCKRepairOnAcid.java index d57323e6549e..b1798597abde 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMSCKRepairOnAcid.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMSCKRepairOnAcid.java @@ -507,4 +507,19 @@ public void testNonPartitionedTable() throws Exception { runStatementOnDriver("drop table if exists " + acidTblMsck); } + + @Test + public void testInvalidPartitionNameErrorMessage() throws Exception { + runStatementOnDriver("drop table if exists " + acidTblPartMsck); + runStatementOnDriver("create table " + acidTblPartMsck + + " (a int, b int) partitioned by (p string) clustered by (a) into 2 buckets" + + " stored as orc TBLPROPERTIES ('transactional'='true')"); + FileSystem fs = FileSystem.get(hiveConf); + fs.mkdirs(new Path(getWarehouseDir(), acidTblPartMsck+ "/part")); + try { + runStatementOnDriver("msck repair table " + acidTblPartMsck); + } catch (Exception e){ + Assert.assertEquals("Error message did not match",true,e.getMessage().contains("Invalid partition name")); + } + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMaterializedViewsCache.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMaterializedViewsCache.java index 34e85b471951..6978fd10004d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMaterializedViewsCache.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestMaterializedViewsCache.java @@ -157,7 +157,7 @@ private Table getTable(String db, String tableName, String definition) { private static HiveRelOptMaterialization createMaterialization(Table table) throws ParseException { return new HiveRelOptMaterialization( new DummyRel(table), new DummyRel(table), null, asList(table.getDbName(), table.getTableName()), - EnumSet.allOf(HiveRelOptMaterialization.RewriteAlgorithm.class), + RewriteAlgorithm.ALL, HiveRelOptMaterialization.IncrementalRebuildMode.AVAILABLE, ParseUtils.parse(table.getViewExpandedText(), null)); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java index 1c26899ce21e..09d9dc2f5327 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestSessionHiveMetastoreClientListPartitionsTempTable.java @@ -260,7 +260,7 @@ private void checkPartitionNames(List expected, short numParts, String o @Test public void testListPartitionNames() throws Exception { Table t = createTable4PartColsParts(getClient()).table; - String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = HiveConf.getVar(conf, HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List> testValues = Lists.newArrayList( Lists.newArrayList("1999", defaultPartitionName, "02"), Lists.newArrayList(defaultPartitionName, "02", "10"), diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentMaterializationRule.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentMaterializationRule.java new file mode 100644 index 000000000000..2a850cfc5365 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentMaterializationRule.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import org.apache.calcite.plan.RelOptRule; +import org.apache.calcite.rel.RelNode; +import org.apache.hadoop.hive.common.ValidReaderWriteIdList; +import org.apache.hadoop.hive.common.ValidTxnWriteIdList; +import org.apache.hadoop.hive.common.ValidWriteIdList; +import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveFilter; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import java.util.BitSet; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.Is.is; +import static org.hamcrest.core.IsInstanceOf.instanceOf; +import static org.mockito.Mockito.doReturn; + +@RunWith(MockitoJUnitRunner.class) +public class TestHiveAugmentMaterializationRule extends TestRuleBase { + @Test + public void testFilterIsCreatedInTopOfTSWhenTableHasChangesSinceSavedSnapshot() { + RelNode tableScan = createTS(t1NativeMock, "t1"); + + ValidTxnWriteIdList current = new ValidTxnWriteIdList(10L); + ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList("default.t1", new long[0], new BitSet(), 10L); + current.addTableValidWriteIdList(validWriteIdList); + + ValidTxnWriteIdList mv = new ValidTxnWriteIdList(5L); + validWriteIdList = new ValidReaderWriteIdList("default.t1", new long[] {4, 6}, new BitSet(), 5L); + mv.addTableValidWriteIdList(validWriteIdList); + + RelOptRule rule = new HiveAugmentMaterializationRule(REX_BUILDER, current, mv); + + RelNode newRoot = HiveMaterializedViewUtils.applyRule(tableScan, rule); + + assertThat(newRoot, instanceOf(HiveFilter.class)); + HiveFilter filter = (HiveFilter) newRoot; + assertThat(filter.getCondition().toString(), is("AND(<=($3.writeId, 5), <>($3.writeId, 4), <>($3.writeId, 6))")); + } + +} \ No newline at end of file diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentSnapshotMaterializationRule.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentSnapshotMaterializationRule.java index 746343d81459..266f5dca92c1 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentSnapshotMaterializationRule.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveAugmentSnapshotMaterializationRule.java @@ -39,7 +39,7 @@ public class TestHiveAugmentSnapshotMaterializationRule extends TestRuleBase { @Test public void testWhenSnapshotAndTableAreEmptyNoFilterAdded() { - RelNode tableScan = createTS(); + RelNode tableScan = createT2IcebergTS(); RelOptRule rule = HiveAugmentSnapshotMaterializationRule.with(Collections.emptyMap()); RelNode newRoot = HiveMaterializedViewUtils.applyRule(tableScan, rule); @@ -50,7 +50,7 @@ public void testWhenSnapshotAndTableAreEmptyNoFilterAdded() { @Test public void testWhenNoSnapshotButTableHasNewDataAFilterWithDefaultSnapshotIDAdded() { doReturn(new SnapshotContext(42)).when(table2storageHandler).getCurrentSnapshotContext(table2); - RelNode tableScan = createTS(); + RelNode tableScan = createT2IcebergTS(); RelOptRule rule = HiveAugmentSnapshotMaterializationRule.with(Collections.emptyMap()); RelNode newRoot = HiveMaterializedViewUtils.applyRule(tableScan, rule); @@ -63,7 +63,7 @@ public void testWhenNoSnapshotButTableHasNewDataAFilterWithDefaultSnapshotIDAdde @Test public void testWhenMVAndTableCurrentSnapshotAreTheSameNoFilterAdded() { doReturn(new SnapshotContext(42)).when(table2storageHandler).getCurrentSnapshotContext(table2); - RelNode tableScan = createTS(); + RelNode tableScan = createT2IcebergTS(); Map mvSnapshot = new HashMap<>(); mvSnapshot.put(table2.getFullyQualifiedName(), new SnapshotContext(42)); RelOptRule rule = HiveAugmentSnapshotMaterializationRule.with(mvSnapshot); @@ -76,7 +76,7 @@ public void testWhenMVAndTableCurrentSnapshotAreTheSameNoFilterAdded() { @Test public void testWhenMVSnapshotIsDifferentThanTableCurrentSnapshotHasNewDataAFilterWithMVSnapshotIdAdded() { doReturn(new SnapshotContext(10)).when(table2storageHandler).getCurrentSnapshotContext(table2); - RelNode tableScan = createTS(); + RelNode tableScan = createT2IcebergTS(); Map mvSnapshot = new HashMap<>(); mvSnapshot.put(table2.getFullyQualifiedName(), new SnapshotContext(42)); RelOptRule rule = HiveAugmentSnapshotMaterializationRule.with(mvSnapshot); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHivePushdownSnapshotFilterRule.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHivePushdownSnapshotFilterRule.java index c60d83a8df4f..c9569061563c 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHivePushdownSnapshotFilterRule.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHivePushdownSnapshotFilterRule.java @@ -43,7 +43,7 @@ public class TestHivePushdownSnapshotFilterRule extends TestRuleBase { @Test public void testFilterIsRemovedAndVersionIntervalFromIsSetWhenFilterHasSnapshotIdPredicate() { - RelNode tableScan = createTS(); + RelNode tableScan = createT2IcebergTS(); RelBuilder relBuilder = HiveRelFactories.HIVE_BUILDER.create(relOptCluster, schemaMock); RelNode root = relBuilder.push(tableScan) @@ -64,7 +64,7 @@ public void testFilterIsRemovedAndVersionIntervalFromIsSetWhenFilterHasSnapshotI @Test public void testFilterLeftIntactWhenItDoesNotHaveSnapshotIdPredicate() { - RelNode tableScan = createTS(); + RelNode tableScan = createT2IcebergTS(); RelBuilder relBuilder = HiveRelFactories.HIVE_BUILDER.create(relOptCluster, schemaMock); RelNode root = relBuilder.push(tableScan) diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveRowIsDeletedPropagator.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveRowIsDeletedPropagator.java new file mode 100644 index 000000000000..042ae8f62479 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestHiveRowIsDeletedPropagator.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.optimizer.calcite.rules.views; + +import org.apache.calcite.plan.RelOptUtil; +import org.apache.calcite.rel.RelNode; +import org.apache.calcite.rel.core.JoinRelType; +import org.apache.calcite.rel.type.RelDataType; +import org.apache.calcite.rex.RexNode; +import org.apache.calcite.sql.fun.SqlStdOperatorTable; +import org.apache.calcite.sql.type.SqlTypeName; +import org.apache.calcite.tools.RelBuilder; +import org.apache.hadoop.hive.ql.metadata.VirtualColumn; +import org.apache.hadoop.hive.ql.optimizer.calcite.HiveRelFactories; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.junit.MockitoJUnitRunner; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; + +@RunWith(MockitoJUnitRunner.class) +public class TestHiveRowIsDeletedPropagator extends TestRuleBase { + @Test + public void testJoining3TablesAndAllChanged() { + RelNode ts1 = createTS(t1NativeMock, "t1"); + RelNode ts2 = createTS(t2NativeMock, "t2"); + RelNode ts3 = createTS(t3NativeMock, "t3"); + + RelBuilder relBuilder = HiveRelFactories.HIVE_BUILDER.create(relOptCluster, null); + + RexNode joinCondition = REX_BUILDER.makeCall(SqlStdOperatorTable.EQUALS, + REX_BUILDER.makeInputRef(ts1.getRowType().getFieldList().get(0).getType(), 0), + REX_BUILDER.makeInputRef(ts2.getRowType().getFieldList().get(0).getType(), 5)); + RelNode join1 = relBuilder + .push(ts1) + .filter(REX_BUILDER.makeCall(SqlStdOperatorTable.IS_NOT_NULL, REX_BUILDER.makeInputRef(ts1, 0))) + .push(ts2) + .filter(REX_BUILDER.makeCall(SqlStdOperatorTable.IS_NOT_NULL, REX_BUILDER.makeInputRef(ts2, 0))) + .join(JoinRelType.INNER, joinCondition) + .build(); + + RexNode joinCondition2 = REX_BUILDER.makeCall(SqlStdOperatorTable.EQUALS, + REX_BUILDER.makeInputRef(ts3.getRowType().getFieldList().get(0).getType(), 10), + REX_BUILDER.makeInputRef(join1.getRowType().getFieldList().get(5).getType(), 5)); + + RelDataType bigIntType = relBuilder.getTypeFactory().createSqlType(SqlTypeName.BIGINT); + + RexNode writeIdFilter = REX_BUILDER.makeCall(SqlStdOperatorTable.OR, + REX_BUILDER.makeCall(SqlStdOperatorTable.LESS_THAN, REX_BUILDER.makeLiteral(1, bigIntType, false), rowIdFieldAccess(ts1, 3)), + REX_BUILDER.makeCall(SqlStdOperatorTable.LESS_THAN, REX_BUILDER.makeLiteral(1, bigIntType, false), rowIdFieldAccess(ts2, 8)), + REX_BUILDER.makeCall(SqlStdOperatorTable.LESS_THAN, REX_BUILDER.makeLiteral(1, bigIntType, false), rowIdFieldAccess(ts3, 13))); + + RelNode root = relBuilder + .push(join1) + .push(ts3) + .filter(REX_BUILDER.makeCall(SqlStdOperatorTable.IS_NOT_NULL, REX_BUILDER.makeInputRef(ts3, 0))) + .join(JoinRelType.INNER, joinCondition2) + .filter(writeIdFilter) + .build(); + +// System.out.println(RelOptUtil.toString(root)); + + HiveRowIsDeletedPropagator propagator = new HiveRowIsDeletedPropagator(relBuilder); + RelNode newRoot = propagator.propagate(root); + + String dump = RelOptUtil.toString(newRoot); + assertThat(dump, is(EXPECTED_testJoining3TablesAndAllChanged)); + } + + private static final String EXPECTED_testJoining3TablesAndAllChanged = + "HiveFilter(condition=[OR(<(1, $3.writeId), <(1, $8.writeId), <(1, $13.writeId))])\n" + + " HiveFilter(condition=[OR(NOT($15), NOT($16))])\n" + + " HiveProject(a=[$0], b=[$1], c=[$2], ROW__ID=[$3], ROW__IS__DELETED=[$4], d=[$5], e=[$6], f=[$7], ROW__ID0=[$8], ROW__IS__DELETED0=[$9], g=[$12], h=[$13], i=[$14], ROW__ID1=[$15], ROW__IS__DELETED1=[$16], _any_deleted=[OR($10, $17)], _any_inserted=[OR($11, $18)])\n" + + " HiveJoin(condition=[=($12, $5)], joinType=[inner], algorithm=[none], cost=[not available])\n" + + " HiveFilter(condition=[OR(NOT($10), NOT($11))])\n" + + " HiveProject(a=[$0], b=[$1], c=[$2], ROW__ID=[$3], ROW__IS__DELETED=[$4], d=[$7], e=[$8], f=[$9], ROW__ID0=[$10], ROW__IS__DELETED0=[$11], _any_deleted=[OR($5, $12)], _any_inserted=[OR($6, $13)])\n" + + " HiveJoin(condition=[=($0, $7)], joinType=[inner], algorithm=[none], cost=[not available])\n" + + " HiveFilter(condition=[IS NOT NULL($0)])\n" + + " HiveProject(a=[$0], b=[$1], c=[$2], ROW__ID=[$3], ROW__IS__DELETED=[$4], _deleted=[AND($4, <(1, $3.writeId))], _inserted=[AND(<(1, $3.writeId), NOT($4))])\n" + + " HiveTableScan(table=[[]], table:alias=[t1])\n" + + " HiveFilter(condition=[IS NOT NULL($0)])\n" + + " HiveProject(d=[$0], e=[$1], f=[$2], ROW__ID=[$3], ROW__IS__DELETED=[$4], _deleted=[AND($4, <(1, $3.writeId))], _inserted=[AND(<(1, $3.writeId), NOT($4))])\n" + + " HiveTableScan(table=[[]], table:alias=[t2])\n" + + " HiveFilter(condition=[IS NOT NULL($0)])\n" + + " HiveProject(g=[$0], h=[$1], i=[$2], ROW__ID=[$3], ROW__IS__DELETED=[$4], _deleted=[AND($4, <(1, $3.writeId))], _inserted=[AND(<(1, $3.writeId), NOT($4))])\n" + + " HiveTableScan(table=[[]], table:alias=[t3])\n"; + + private RexNode rowIdFieldAccess(RelNode tableScan, int posInTarget) { + int rowIDPos = tableScan.getTable().getRowType().getField( + VirtualColumn.ROWID.getName(), false, false).getIndex(); + return REX_BUILDER.makeFieldAccess(REX_BUILDER.makeInputRef( + tableScan.getTable().getRowType().getFieldList().get(rowIDPos).getType(), posInTarget), 0); + } +} \ No newline at end of file diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestRuleBase.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestRuleBase.java index 2d1d8133dd71..f56e57107f9d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestRuleBase.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/views/TestRuleBase.java @@ -20,33 +20,63 @@ import org.apache.calcite.jdbc.JavaTypeFactoryImpl; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptPlanner; +import org.apache.calcite.rel.RelNode; import org.apache.calcite.rel.type.RelDataType; import org.apache.calcite.rel.type.RelDataTypeFactory; +import org.apache.calcite.rel.type.RelRecordType; import org.apache.calcite.rex.RexBuilder; import org.apache.calcite.sql.type.SqlTypeName; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; +import org.apache.hadoop.hive.ql.optimizer.calcite.CalciteSemanticException; import org.apache.hadoop.hive.ql.optimizer.calcite.HiveTypeSystemImpl; import org.apache.hadoop.hive.ql.optimizer.calcite.RelOptHiveTable; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveRelNode; import org.apache.hadoop.hive.ql.optimizer.calcite.reloperators.HiveTableScan; +import org.apache.hadoop.hive.ql.optimizer.calcite.translator.TypeConverter; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; import org.junit.Before; import org.junit.BeforeClass; import org.mockito.Mock; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; import java.util.List; +import java.util.Map; import static java.util.Arrays.asList; +import static java.util.Collections.singletonList; import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.lenient; public class TestRuleBase { protected static final RexBuilder REX_BUILDER = new RexBuilder(new JavaTypeFactoryImpl(new HiveTypeSystemImpl())); protected static final RelDataTypeFactory TYPE_FACTORY = REX_BUILDER.getTypeFactory(); protected static RelOptCluster relOptCluster; + + @Mock + protected RelOptHiveTable t1NativeMock; + protected static RelDataType t1NativeType; + protected static Table t1Native; + @Mock + protected static HiveStorageHandler t1NativeStorageHandler; + @Mock + protected RelOptHiveTable t2NativeMock; + protected static RelDataType t2NativeType; + protected static Table t2Native; + @Mock + protected static HiveStorageHandler t2NativeStorageHandler; + @Mock + protected RelOptHiveTable t3NativeMock; + protected static RelDataType t3NativeType; + protected static Table t3Native; + @Mock + protected static HiveStorageHandler t3NativeStorageHandler; + @Mock protected RelOptHiveTable table2Mock; protected static RelDataType table2Type; @@ -58,27 +88,79 @@ public class TestRuleBase { public static void beforeClass() throws Exception { RelOptPlanner planner = CalcitePlanner.createPlanner(new HiveConf()); relOptCluster = RelOptCluster.create(planner, REX_BUILDER); - List t2Schema = asList( - TYPE_FACTORY.createSqlType(SqlTypeName.INTEGER), - TYPE_FACTORY.createSqlType(SqlTypeName.VARCHAR), - TYPE_FACTORY.createSqlType(SqlTypeName.INTEGER), - HiveAugmentSnapshotMaterializationRule.snapshotIdType(TYPE_FACTORY)); - table2Type = TYPE_FACTORY.createStructType(t2Schema, asList("d", "e", "f", VirtualColumn.SNAPSHOT_ID.getName())); - table2 = new Table(); - table2.setTTable(new org.apache.hadoop.hive.metastore.api.Table()); - table2.setDbName("default"); - table2.setTableName("t2"); + + t1Native = createTable("t1"); + t2Native = createTable("t2"); + t3Native = createTable("t3"); + t1NativeType = createTableType(new HashMap() {{ + put("a", SqlTypeName.INTEGER); + put("b", SqlTypeName.VARCHAR); + put("c", SqlTypeName.INTEGER); + }}, asList(VirtualColumn.ROWID, VirtualColumn.ROWISDELETED)); + t2NativeType = createTableType(new HashMap() {{ + put("d", SqlTypeName.INTEGER); + put("e", SqlTypeName.VARCHAR); + put("f", SqlTypeName.INTEGER); + }}, asList(VirtualColumn.ROWID, VirtualColumn.ROWISDELETED)); + t3NativeType = createTableType(new HashMap() {{ + put("g", SqlTypeName.INTEGER); + put("h", SqlTypeName.VARCHAR); + put("i", SqlTypeName.INTEGER); + }}, asList(VirtualColumn.ROWID, VirtualColumn.ROWISDELETED)); + + table2 = createTable("t2_iceberg"); + table2Type = createTableType(new HashMap() {{ + put("d", SqlTypeName.INTEGER); + put("e", SqlTypeName.VARCHAR); + put("f", SqlTypeName.INTEGER); + }}, singletonList(VirtualColumn.SNAPSHOT_ID)); + } + + private static Table createTable(String name) { + Table table = new Table(); + table.setTTable(new org.apache.hadoop.hive.metastore.api.Table()); + table.setDbName("default"); + table.setTableName(name); + return table; + } + + private static RelDataType createTableType(Map columns, Collection virtualColumns) + throws CalciteSemanticException { + List schema = new ArrayList<>(columns.size() + virtualColumns.size()); + List columnNames = new ArrayList<>(columns.size() + virtualColumns.size()); + for (Map.Entry column : columns.entrySet()) { + columnNames.add(column.getKey()); + schema.add(TYPE_FACTORY.createTypeWithNullability(TYPE_FACTORY.createSqlType(column.getValue()), true)); + } + for (VirtualColumn virtualColumn : virtualColumns) { + columnNames.add(virtualColumn.getName()); + schema.add(TypeConverter.convert(virtualColumn.getTypeInfo(), TYPE_FACTORY)); + } + return TYPE_FACTORY.createStructType(schema, columnNames); } @Before public void setup() { - doReturn(table2Type).when(table2Mock).getRowType(); - doReturn(table2).when(table2Mock).getHiveTableMD(); + lenient().doReturn(t1NativeType).when(t1NativeMock).getRowType(); + lenient().doReturn(t1Native).when(t1NativeMock).getHiveTableMD(); + + lenient().doReturn(t2NativeType).when(t2NativeMock).getRowType(); + lenient().doReturn(t2Native).when(t2NativeMock).getHiveTableMD(); + + lenient().doReturn(t3NativeType).when(t3NativeMock).getRowType(); + lenient().doReturn(t3Native).when(t3NativeMock).getHiveTableMD(); + + lenient().doReturn(table2Type).when(table2Mock).getRowType(); + lenient().doReturn(table2).when(table2Mock).getHiveTableMD(); table2.setStorageHandler(table2storageHandler); } - protected HiveTableScan createTS() { + protected RelNode createT2IcebergTS() { + return createTS(table2Mock, "t2"); + } + + protected HiveTableScan createTS(RelOptHiveTable table, String alias) { return new HiveTableScan(relOptCluster, relOptCluster.traitSetOf(HiveRelNode.CONVENTION), - table2Mock, "t2", null, false, false); + table, alias, null, false, false); } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TestASTConverter.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TestASTConverter.java index ddea68429f82..3c48447d51e2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TestASTConverter.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/translator/TestASTConverter.java @@ -36,11 +36,40 @@ import static java.util.Arrays.asList; import static java.util.Collections.singletonList; +import static org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.convertType; import static org.apache.hadoop.hive.ql.optimizer.calcite.translator.ASTConverter.emptyPlan; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; class TestASTConverter { + + @Test + void testConvertTypeWhenInputIsStruct() { + List fields = asList( + new RelDataTypeFieldImpl("a", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), + new RelDataTypeFieldImpl("b", 1, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.CHAR, 30)), + new RelDataTypeFieldImpl("c", 2, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.NULL))); + + RelDataType dataType = new RelRecordType(fields); + + ASTNode tree = convertType(dataType); + assertThat(tree.dump(), is(EXPECTED_STRUCT_TREE)); + } + + private static final String EXPECTED_STRUCT_TREE = "\n" + + "TOK_STRUCT\n" + + " TOK_TABCOLLIST\n" + + " TOK_TABCOL\n" + + " a\n" + + " TOK_INT\n" + + " TOK_TABCOL\n" + + " b\n" + + " TOK_CHAR\n" + + " 30\n" + + " TOK_TABCOL\n" + + " c\n" + + " TOK_NULL\n"; + @Test void testEmptyPlanWhenInputSchemaIsEmpty() { RelRecordType dataType = new RelRecordType(Collections.emptyList()); @@ -54,9 +83,9 @@ void testEmptyPlanWhenInputSchemaIsEmpty() { @Test void testEmptyPlan() { List fields = asList( - new RelDataTypeFieldImpl("a", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), - new RelDataTypeFieldImpl("b", 1, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.CHAR, 30)), - new RelDataTypeFieldImpl("c", 2, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.NULL))); + new RelDataTypeFieldImpl("a", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), + new RelDataTypeFieldImpl("b", 1, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.CHAR, 30)), + new RelDataTypeFieldImpl("c", 2, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.NULL))); RelDataType dataType = new RelRecordType(fields); ASTNode tree = emptyPlan(dataType); @@ -65,96 +94,81 @@ void testEmptyPlan() { } private static final String EXPECTED_TREE = "\n" + - "TOK_QUERY\n" + - " TOK_INSERT\n" + - " TOK_DESTINATION\n" + - " TOK_DIR\n" + - " TOK_TMP_FILE\n" + - " TOK_SELECT\n" + - " TOK_SELEXPR\n" + - " TOK_FUNCTION\n" + - " TOK_INT\n" + - " TOK_NULL\n" + - " a\n" + - " TOK_SELEXPR\n" + - " TOK_FUNCTION\n" + - " TOK_CHAR\n" + - " 30\n" + - " TOK_NULL\n" + - " b\n" + - " TOK_SELEXPR\n" + - " TOK_NULL\n" + - " c\n" + - " TOK_LIMIT\n" + - " 0\n" + - " 0\n"; + "TOK_QUERY\n" + + " TOK_INSERT\n" + + " TOK_DESTINATION\n" + + " TOK_DIR\n" + + " TOK_TMP_FILE\n" + + " TOK_SELECT\n" + + " TOK_SELEXPR\n" + + " TOK_FUNCTION\n" + + " TOK_INT\n" + + " TOK_NULL\n" + + " a\n" + + " TOK_SELEXPR\n" + + " TOK_FUNCTION\n" + + " TOK_CHAR\n" + + " 30\n" + + " TOK_NULL\n" + + " b\n" + + " TOK_SELEXPR\n" + + " TOK_NULL\n" + + " c\n" + + " TOK_LIMIT\n" + + " 0\n" + + " 0\n"; @Test - void testEmptyPlanWithComplexTypes() { + void testEmptyPlanWithNestedComplexTypes() { List nestedStructFields = asList( - new RelDataTypeFieldImpl("nf1", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), - new RelDataTypeFieldImpl("nf2", 1, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.CHAR, 30))); + new RelDataTypeFieldImpl("nf1", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), + new RelDataTypeFieldImpl("nf2", 1, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.CHAR, 30))); List structFields = asList( - new RelDataTypeFieldImpl("f1", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), - new RelDataTypeFieldImpl("farray", 1, - new ArraySqlType(new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER), true)), - new RelDataTypeFieldImpl("fmap", 2, new MapSqlType( - new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER), - new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER), true)), - new RelDataTypeFieldImpl("fstruct", 3, - new RelRecordType(nestedStructFields))); + new RelDataTypeFieldImpl("f1", 0, new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER)), + new RelDataTypeFieldImpl("farray", 1, + new ArraySqlType(new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER), true)), + new RelDataTypeFieldImpl("fmap", 2, new MapSqlType( + new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER), + new BasicSqlType(new HiveTypeSystemImpl(), SqlTypeName.INTEGER), true)), + new RelDataTypeFieldImpl("fstruct", 3, + new RelRecordType(nestedStructFields))); List fields = singletonList(new RelDataTypeFieldImpl("a", 0, new RelRecordType(structFields))); RelDataType dataType = new RelRecordType(fields); - ASTNode tree = emptyPlan(dataType); + ASTNode tree = convertType(dataType); assertThat(tree.dump(), is(EXPECTED_COMPLEX_TREE)); } private static final String EXPECTED_COMPLEX_TREE = "\n" + - "TOK_QUERY\n" + - " TOK_INSERT\n" + - " TOK_DESTINATION\n" + - " TOK_DIR\n" + - " TOK_TMP_FILE\n" + - " TOK_SELECT\n" + - " TOK_SELEXPR\n" + - " TOK_FUNCTION\n" + - " named_struct\n" + - " f1\n" + - " TOK_FUNCTION\n" + - " TOK_INT\n" + - " TOK_NULL\n" + - " farray\n" + - " TOK_FUNCTION\n" + - " array\n" + - " TOK_FUNCTION\n" + - " TOK_INT\n" + - " TOK_NULL\n" + - " fmap\n" + - " TOK_FUNCTION\n" + - " map\n" + - " TOK_FUNCTION\n" + - " TOK_INT\n" + - " TOK_NULL\n" + - " TOK_FUNCTION\n" + - " TOK_INT\n" + - " TOK_NULL\n" + - " fstruct\n" + - " TOK_FUNCTION\n" + - " named_struct\n" + - " nf1\n" + - " TOK_FUNCTION\n" + - " TOK_INT\n" + - " TOK_NULL\n" + - " nf2\n" + - " TOK_FUNCTION\n" + - " TOK_CHAR\n" + - " 30\n" + - " TOK_NULL\n" + - " a\n" + - " TOK_LIMIT\n" + - " 0\n" + - " 0\n"; + "TOK_STRUCT\n" + + " TOK_TABCOLLIST\n" + + " TOK_TABCOL\n" + + " a\n" + + " TOK_STRUCT\n" + + " TOK_TABCOLLIST\n" + + " TOK_TABCOL\n" + + " f1\n" + + " TOK_INT\n" + + " TOK_TABCOL\n" + + " farray\n" + + " TOK_LIST\n" + + " TOK_INT\n" + + " TOK_TABCOL\n" + + " fmap\n" + + " TOK_MAP\n" + + " TOK_INT\n" + + " TOK_INT\n" + + " TOK_TABCOL\n" + + " fstruct\n" + + " TOK_STRUCT\n" + + " TOK_TABCOLLIST\n" + + " TOK_TABCOL\n" + + " nf1\n" + + " TOK_INT\n" + + " TOK_TABCOL\n" + + " nf2\n" + + " TOK_CHAR\n" + + " 30\n"; } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java index c9fc2a54edd6..07eabd171f55 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/optimizer/physical/TestNullScanTaskDispatcher.java @@ -85,7 +85,7 @@ public class TestNullScanTaskDispatcher { public void setup() { hiveConf = new HiveConf(); hiveConf.set("fs.mock.impl", MockFileSystem.class.getName()); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVEMETADATAONLYQUERIES, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_METADATA_ONLY_QUERIES, true); sessionState = SessionState.start(hiveConf); parseContext = spy(new ParseContext()); context = new Context(hiveConf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java index ac5795295158..211c80aa662b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestDMLSemanticAnalyzer.java @@ -232,7 +232,7 @@ public void setup() throws Exception { conf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(HiveConf.ConfVars.HIVE_MAPRED_MODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); conf.setBoolVar(HiveConf.ConfVars.HIVE_IN_TEST, true); conf.set(ValidTxnList.VALID_TXNS_KEY, diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java index 842eb1dd78bb..ce4c774c7385 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/repl/metric/TestReplicationMetricCollector.java @@ -49,7 +49,6 @@ import org.junit.runner.RunWith; import org.mockito.MockedStatic; import org.mockito.Mockito; -import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import java.util.Map; @@ -70,16 +69,17 @@ public class TestReplicationMetricCollector { HiveConf conf; - @Mock private FailoverMetaData fmd; - @Mock private MetricSink metricSinkInstance; - static MockedStatic metricSinkMockedStatic; + MockedStatic metricSinkMockedStatic; @Before public void setup() throws Exception { + fmd = Mockito.mock(FailoverMetaData.class); + metricSinkInstance = Mockito.mock(MetricSink.class); + conf = new HiveConf(); conf.set(Constants.SCHEDULED_QUERY_SCHEDULENAME, "repl"); conf.set(Constants.SCHEDULED_QUERY_EXECUTIONID, "1"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/type/TestExprNodeDescExprFactory.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/type/TestExprNodeDescExprFactory.java new file mode 100644 index 000000000000..b97c2261ee03 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/type/TestExprNodeDescExprFactory.java @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse.type; + +import junit.framework.TestCase; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.ql.exec.ColumnInfo; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; + +import java.util.HashMap; + +import static java.util.Arrays.asList; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; + +public class TestExprNodeDescExprFactory extends TestCase { + + public void testToExprWhenColumnIsPrimitive() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("decimal(3,2)"); + DecimalTypeInfo typeInfo = new DecimalTypeInfo(3, 2); + columnInfo.setObjectinspector(PrimitiveObjectInspectorFactory.getPrimitiveWritableConstantObjectInspector( + typeInfo, new HiveDecimalWritable(HiveDecimal.create(6.4)))); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("6.4")); + } + + public void testToExprWhenColumnIsPrimitiveNullValue() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("decimal(3,2)"); + DecimalTypeInfo typeInfo = new DecimalTypeInfo(3, 2); + columnInfo.setObjectinspector(PrimitiveObjectInspectorFactory.getPrimitiveWritableConstantObjectInspector( + typeInfo, null)); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("null")); + } + + public void testToExprWhenColumnIsList() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("array"); + DecimalTypeInfo typeInfo = new DecimalTypeInfo(3, 2); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantListObjectInspector( + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(typeInfo), + asList( + new HiveDecimalWritable(HiveDecimal.create(5d)), + new HiveDecimalWritable(HiveDecimal.create(0.4)), + null))); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("Const array [5, 0.4, null]")); + } + + public void testToExprWhenColumnIsListWithNullValue() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("array"); + DecimalTypeInfo typeInfo = new DecimalTypeInfo(3, 2); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantListObjectInspector( + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(typeInfo), null)); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("Const array null")); + } + + public void testToExprWhenColumnIsMap() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("map"); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantMapObjectInspector( + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( + PrimitiveObjectInspector.PrimitiveCategory.INT), + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( + PrimitiveObjectInspector.PrimitiveCategory.STRING), + new HashMap() {{ put(new IntWritable(4), new Text("foo")); put(null, null); }})); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("Const map {null=null, 4=foo}")); + } + + public void testToExprWhenColumnIsMapWithNullValue() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("map"); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantMapObjectInspector( + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( + PrimitiveObjectInspector.PrimitiveCategory.INT), + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( + PrimitiveObjectInspector.PrimitiveCategory.STRING), + null)); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("Const map null")); + } + + public void testToExprWhenColumnIsStruct() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("struct"); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantStructObjectInspector( + asList("f1", "f2"), + asList(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.INT), + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING)), + asList(new IntWritable(4), new Text("foo")))); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("const struct(4,'foo')")); + } + + public void testToExprWhenColumnIsStructWithNullFields() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("struct"); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantStructObjectInspector( + asList("f1", "f2"), + asList(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.INT), + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING)), + asList(null, null))); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("const struct(null,null)")); + } + + public void testToExprWhenColumnIsStructWithNullValue() throws SemanticException { + ExprNodeDescExprFactory exprFactory = new ExprNodeDescExprFactory(); + ColumnInfo columnInfo = new ColumnInfo(); + columnInfo.setTypeName("struct"); + columnInfo.setObjectinspector(ObjectInspectorFactory.getStandardConstantStructObjectInspector( + asList("f1", "f2"), + asList(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.INT), + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(PrimitiveObjectInspector.PrimitiveCategory.STRING)), + null)); + + ExprNodeDesc exprNodeDesc = exprFactory.toExpr(columnInfo, null, 0); + + assertThat(exprNodeDesc.getExprString(), is("null")); + } + +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java index 780fb2a58e22..56c30c103bb2 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/TestConditionalResolverCommonJoin.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.junit.Test; -import java.io.Serializable; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -70,19 +69,19 @@ public void testResolvingDriverAlias() throws Exception { ctx.setAliasToKnownSize(aliasToKnownSize); HiveConf conf = new HiveConf(); - conf.setLongVar(HiveConf.ConfVars.HIVESMALLTABLESFILESIZE, 4096); + conf.setLongVar(HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE, 4096); // alias3 only can be selected Task resolved = resolver.resolveMapJoinTask(ctx, conf); Assert.assertEquals("alias3", resolved.getId()); - conf.setLongVar(HiveConf.ConfVars.HIVESMALLTABLESFILESIZE, 65536); + conf.setLongVar(HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE, 65536); // alias1, alias2, alias3 all can be selected but overriden by biggest one (alias3) resolved = resolver.resolveMapJoinTask(ctx, conf); Assert.assertEquals("alias3", resolved.getId()); - conf.setLongVar(HiveConf.ConfVars.HIVESMALLTABLESFILESIZE, 2048); + conf.setLongVar(HiveConf.ConfVars.HIVE_SMALL_TABLES_FILESIZE, 2048); // not selected resolved = resolver.resolveMapJoinTask(ctx, conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java index 3a50356cf8f0..66f508e215ac 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestCounterMapping.java @@ -148,7 +148,7 @@ public void testInConversion() throws ParseException, CommandProcessorException "explain select sum(id_uv) from tu where u in (1,2) group by u"; HiveConf conf = env_setup.getTestCtx().hiveConf; - conf.setIntVar(ConfVars.HIVEPOINTLOOKUPOPTIMIZERMIN, 10); + conf.setIntVar(ConfVars.HIVE_POINT_LOOKUP_OPTIMIZER_MIN, 10); IDriver driver = createDriver(); PlanMapper pm = getMapperForQuery(driver, query); @@ -229,7 +229,7 @@ private static IDriver createDriver() { "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); // conf.setVar(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK, CheckInputReadEntityDirect.class.getName()); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - HiveConf.setVar(conf, HiveConf.ConfVars.POSTEXECHOOKS, OperatorStatsReaderHook.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS, OperatorStatsReaderHook.class.getName()); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java index e5fcc3a0d762..dbe7e967f374 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestOperatorCmp.java @@ -190,7 +190,7 @@ private boolean compareOperators(Operator opL, Operator opR) { private static IDriver createDriver() { HiveConf conf = env_setup.getTestCtx().hiveConf; - conf.setBoolVar(ConfVars.HIVEOPTPPD, false); + conf.setBoolVar(ConfVars.HIVE_OPT_PPD, false); conf.setBoolVar(ConfVars.HIVE_QUERY_REEXECUTION_ENABLED, true); conf.setBoolVar(ConfVars.HIVE_VECTORIZATION_ENABLED, false); conf.setBoolVar(ConfVars.HIVE_QUERY_REEXECUTION_ALWAYS_COLLECT_OPERATOR_STATS, true); @@ -202,7 +202,7 @@ private static IDriver createDriver() { conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - HiveConf.setVar(conf, HiveConf.ConfVars.POSTEXECHOOKS, OperatorStatsReaderHook.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS, OperatorStatsReaderHook.class.getName()); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java index b67385737ef1..43e6a820f020 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/plan/mapping/TestReOptimization.java @@ -303,7 +303,7 @@ private static IDriver createDriver(String strategies) { conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); HiveConf.setBoolVar(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - HiveConf.setVar(conf, HiveConf.ConfVars.POSTEXECHOOKS, OperatorStatsReaderHook.class.getName()); + HiveConf.setVar(conf, HiveConf.ConfVars.POST_EXEC_HOOKS, OperatorStatsReaderHook.class.getName()); SessionState.start(conf); IDriver driver = DriverFactory.newDriver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java index ddbbef0b0134..8a993686a690 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java @@ -65,13 +65,13 @@ public void setupTest() throws Exception { public void testHiddenConfig() throws Exception { runSetProcessor(""); String output = baos.toString(); - Assert.assertFalse(output.contains(HiveConf.ConfVars.METASTOREPWD.varname + "=")); + Assert.assertFalse(output.contains(HiveConf.ConfVars.METASTORE_PWD.varname + "=")); Assert.assertFalse(output.contains(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "=")); } @Test public void testHiddenConfigSetVarName() throws CommandProcessorException { - runSetProcessor(HiveConf.ConfVars.METASTOREPWD.varname); + runSetProcessor(HiveConf.ConfVars.METASTORE_PWD.varname); String output = baos.toString(); Assert.assertTrue(output.contains("hidden")); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/reexec/TestReExecuteLostAMQueryPlugin.java b/ql/src/test/org/apache/hadoop/hive/ql/reexec/TestReExecuteLostAMQueryPlugin.java new file mode 100644 index 000000000000..1d29f324945a --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/reexec/TestReExecuteLostAMQueryPlugin.java @@ -0,0 +1,52 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.reexec; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.QueryState; +import org.apache.hadoop.hive.ql.exec.tez.TezRuntimeException; +import org.apache.hadoop.hive.ql.hooks.HookContext; +import org.junit.Assert; +import org.junit.Test; + +public class TestReExecuteLostAMQueryPlugin { + + @Test + public void testRetryOnUnmanagedAmFailure() throws Exception { + testReExecuteWithExceptionMessage("AM record not found (likely died)"); + } + + @Test + public void testRetryOnNoCurrentDAGException() throws Exception { + testReExecuteWithExceptionMessage("No running DAG at present"); + } + + private void testReExecuteWithExceptionMessage(String message) throws Exception { + ReExecuteLostAMQueryPlugin plugin = new ReExecuteLostAMQueryPlugin(); + ReExecuteLostAMQueryPlugin.LocalHook hook = plugin.new LocalHook(); + + HookContext context = new HookContext(null, QueryState.getNewQueryState(new HiveConf(), null), null, null, null, + null, null, null, null, false, null, null); + context.setHookType(HookContext.HookType.ON_FAILURE_HOOK); + context.setException(new TezRuntimeException("dag_0_0", message)); + + hook.run(context); + + Assert.assertTrue(plugin.shouldReExecute(1)); + } +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java index a04e5f0227be..0f762da0abf0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.ql.Driver; import org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.security.UserGroupInformation; @@ -76,7 +77,8 @@ public static void beforeTest() throws Exception { conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); - conf.setVar(ConfVars.HIVEMAPREDMODE, "nonstrict"); + conf.setVar(ConfVars.HIVE_MAPRED_MODE, "nonstrict"); + conf.setVar(ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); TestTxnDbUtil.prepDb(conf); SessionState.start(conf); @@ -145,6 +147,37 @@ public void testOwnerType() throws Exception { Assert.assertTrue(containsOwnerType); } + @Test + public void testActionTypeForPartitionedTable() throws Exception { + runCmd("CREATE EXTERNAL TABLE Part (eid int, name int) PARTITIONED BY (position int, dept int, sal int)"); + reset(mockedAuthorizer); + runCmd("insert overwrite table part partition(position=2,DEPT,SAL) select 2,2,2,2"); + Pair, List> io = getHivePrivilegeObjectInputs(); + List hpoList = io.getValue(); + Assert.assertFalse(hpoList.isEmpty()); + for (HivePrivilegeObject hpo : hpoList) { + Assert.assertEquals(hpo.getActionType(), HivePrivilegeObject.HivePrivObjectActionType.INSERT_OVERWRITE); + } + } + + /** + * Test to check, if only single instance of Hive Privilege object is created, + * during bulk insert into a partitioned table. + */ + @Test + public void testSingleInstanceOfHPOForPartitionedTable() throws Exception { + reset(mockedAuthorizer); + runCmd("insert overwrite table part partition(position=2,DEPT,SAL)" + + " select 2,2,2,2" + + " union all" + + " select 1,2,3,4" + + " union all" + + " select 3,4,5,6"); + Pair, List> io = getHivePrivilegeObjectInputs(); + List hpoList = io.getValue(); + Assert.assertEquals(1, hpoList.size()); + } + /** * @return pair with left value as inputs and right value as outputs, * passed in current call to authorizer.checkPrivileges diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java index 3a59ad54c4a0..c91622611954 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerCLI.java @@ -53,7 +53,7 @@ processedConf, new HadoopDefaultAuthenticator(), getCLISessionCtx() // check that hook to disable transforms has not been added assertFalse("Check for transform query disabling hook", - processedConf.getVar(ConfVars.PREEXECHOOKS).contains(DisallowTransformHook.class.getName())); + processedConf.getVar(ConfVars.PRE_EXEC_HOOKS).contains(DisallowTransformHook.class.getName())); // verify that some dummy param can be set processedConf.verifyAndSet("dummy.param", "dummy.val"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java index b087d3beab26..1fc3663e75bd 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/sqlstd/TestSQLStdHiveAccessControllerHS2.java @@ -62,7 +62,7 @@ public void testConfigProcessing() throws HiveAuthzPluginException, SecurityExce // check that hook to disable transforms has been added assertTrue("Check for transform query disabling hook", - processedConf.getVar(ConfVars.PREEXECHOOKS).contains(DisallowTransformHook.class.getName())); + processedConf.getVar(ConfVars.PRE_EXEC_HOOKS).contains(DisallowTransformHook.class.getName())); List settableParams = getSettableParams(); verifyParamSettability(settableParams, processedConf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java index 4c374e8d418a..9f93f096d382 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/session/TestSessionState.java @@ -24,7 +24,6 @@ import java.io.File; import java.io.IOException; -import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.util.Arrays; @@ -205,7 +204,7 @@ private void generateRefreshJarFiles(String version) throws IOException, Interru @Test public void testReloadAuxJars2() { HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, ConfVars.HIVERELOADABLEJARS, hiveReloadPath); + HiveConf.setVar(conf, ConfVars.HIVE_RELOADABLE_JARS, hiveReloadPath); SessionState ss = new SessionState(conf); SessionState.start(ss); @@ -275,7 +274,7 @@ public void testReflectionCleanup() throws Exception { @Test public void testReloadExistingAuxJars2() { HiveConf conf = new HiveConf(); - HiveConf.setVar(conf, ConfVars.HIVERELOADABLEJARS, hiveReloadPath); + HiveConf.setVar(conf, ConfVars.HIVE_RELOADABLE_JARS, hiveReloadPath); SessionState ss = new SessionState(conf); SessionState.start(ss); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index add1b0b82bca..d37b54437832 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -74,15 +74,15 @@ String getTestDataDir() { @Before public void setUp() throws Exception { this.hiveConf = new HiveConf(TestStatsUpdaterThread.class); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getTestDataDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); + hiveConf.set(HiveConf.ConfVars.PRE_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.POST_EXEC_HOOKS.varname, ""); + hiveConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, getTestDataDir()); + hiveConf.setVar(HiveConf.ConfVars.HIVE_INPUT_FORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); // hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, true); hiveConf.set(MetastoreConf.ConfVars.STATS_AUTO_UPDATE.getVarname(), "all"); TestTxnDbUtil.setConfValues(hiveConf); TestTxnDbUtil.prepDb(hiveConf); @@ -299,7 +299,7 @@ public void testTxnDynamicPartitions() throws Exception { StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); executeQuery("create table simple_stats (s string) partitioned by (i int)" + " stored as orc " + " TBLPROPERTIES (\"transactional\"=\"true\")"); @@ -326,8 +326,8 @@ public void testExistingOnly() throws Exception { IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); executeQuery("create table simple_stats (i int, s string)"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("insert into simple_stats (i, s) values (1, 'test')"); executeQuery("analyze table simple_stats compute statistics for columns i"); verifyStatsUpToDate("simple_stats", Lists.newArrayList("s"), msClient, false); @@ -350,8 +350,8 @@ public void testQueueingWithThreads() throws Exception { StatsUpdaterThread su = createUpdater(); su.startWorkers(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table simple_stats (s string) partitioned by (i int)"); for (int i = 0; i < PART_COUNT; ++i) { executeQuery("insert into simple_stats partition(i='" + i + "') values ('test')"); @@ -378,8 +378,8 @@ public void testAllPartitions() throws Exception { final int PART_COUNT = 3; StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table simple_stats (s string) partitioned by (i int)"); for (int i = 0; i < PART_COUNT; ++i) { executeQuery("insert into simple_stats partition(i='" + i + "') values ('test')"); @@ -401,14 +401,14 @@ public void testPartitionSubset() throws Exception { final int NONSTAT_PART_COUNT = 3; StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table simple_stats (s string) partitioned by (i int)"); for (int i = 0; i < NONSTAT_PART_COUNT; ++i) { executeQuery("insert into simple_stats partition(i='" + i + "') values ('test')"); } - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, true); executeQuery("insert into simple_stats partition(i='" + NONSTAT_PART_COUNT + "') values ('test')"); verifyPartStatsUpToDate(NONSTAT_PART_COUNT, 0, msClient, "simple_stats", false); @@ -435,8 +435,8 @@ public void testPartitionSubset() throws Exception { public void testPartitionsWithDifferentColsAll() throws Exception { StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table simple_stats (s string, t string, u string) partitioned by (i int)"); executeQuery("insert into simple_stats partition(i=0) values ('test', '0', 'foo')"); executeQuery("insert into simple_stats partition(i=1) values ('test', '1', 'bar')"); @@ -465,8 +465,8 @@ public void testPartitionsWithDifferentColsExistingOnly() throws Exception { hiveConf.set(MetastoreConf.ConfVars.STATS_AUTO_UPDATE.getVarname(), "existing"); StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table simple_stats (s string, t string, u string) partitioned by (i int)"); executeQuery("insert into simple_stats partition(i=0) values ('test', '0', 'foo')"); executeQuery("insert into simple_stats partition(i=1) values ('test', '1', 'bar')"); @@ -502,16 +502,16 @@ public void testParallelOps() throws Exception { hiveConf.setInt(MetastoreConf.ConfVars.STATS_AUTO_UPDATE_WORKER_COUNT.getVarname(), 4); StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table simple_stats (s string)"); executeQuery("create table simple_stats2 (s string) partitioned by (i int)"); executeQuery("create table simple_stats3 (s string) partitioned by (i int)"); executeQuery("insert into simple_stats values ('test')"); executeQuery("insert into simple_stats2 partition(i=0) values ('test')"); executeQuery("insert into simple_stats3 partition(i=0) values ('test')"); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, true); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, true); executeQuery("insert into simple_stats3 partition(i=1) values ('test')"); assertTrue(su.runOneIteration()); @@ -526,8 +526,8 @@ public void testParallelOps() throws Exception { assertEquals(3, su.getQueueLength()); // Nothing new added to the queue while analyze runs. // Add another partition without stats. - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("insert into simple_stats3 partition(i=2) values ('test')"); assertTrue(su.runOneIteration()); @@ -569,8 +569,8 @@ private void testNoStatsUpdateForReplTable(String tblNamePrefix, String txnPrope executeQuery("alter database " + dbName + " set dbproperties('" + ReplConst.TARGET_OF_REPLICATION + "'='true')"); StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table " + tblWOStats + "(i int, s string) " + txnProperty); executeQuery("insert into " + tblWOStats + "(i, s) values (1, 'test')"); @@ -636,8 +636,8 @@ private void testNoStatsUpdateForFailoverDb(String tblNamePrefix, String txnProp String dbName = ss.getCurrentDatabase(); StatsUpdaterThread su = createUpdater(); IMetaStoreClient msClient = new HiveMetaStoreClient(hiveConf); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSAUTOGATHER, false); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_AUTOGATHER, false); + hiveConf.setBoolVar(HiveConf.ConfVars.HIVE_STATS_COL_AUTOGATHER, false); executeQuery("create table " + tblWOStats + "(i int, s string) " + txnProperty); executeQuery("insert into " + tblWOStats + "(i, s) values (1, 'test')"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index ba90d8549d19..38484534b772 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -61,7 +61,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java index 7eb573dedace..d435a07c2dfb 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCleaner.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java index 24c625139ece..b33f8917c74f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestCompactionMetrics.java @@ -51,7 +51,8 @@ import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.TxnHandler; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.ThrowingTxnHandler; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; @@ -665,6 +666,7 @@ public void testDBMetrics() throws Exception { Table t = newTable(dbName, tblName, false); MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.TXN_USE_MIN_HISTORY_LEVEL, false); + TxnHandler.ConfVars.setUseMinHistoryLevel(false); long start = System.currentTimeMillis(); burnThroughTransactions(t.getDbName(), t.getTableName(), 24, new HashSet<>(Arrays.asList(22L, 23L, 24L)), null); openTxn(TxnType.REPL_CREATED); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java index 2eb6185be698..8e332fbe455b 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestDeltaFilesMetrics.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.metrics.AcidMetricService; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.junit.After; @@ -52,6 +53,8 @@ public class TestDeltaFilesMetrics extends CompactorTest { + private static AcidMetricService metricService; + private void setUpHiveConf() { MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.METASTORE_DELTAMETRICS_DELTA_NUM_THRESHOLD, 1); MetastoreConf.setLongVar(conf, MetastoreConf.ConfVars.METASTORE_DELTAMETRICS_OBSOLETE_DELTA_NUM_THRESHOLD, 1); @@ -64,11 +67,15 @@ private void setUpHiveConf() { @Override @Before - public void setup() throws Exception { + public synchronized void setup() throws Exception { this.conf = new HiveConf(); setUpHiveConf(); setup(conf); MetricsFactory.init(conf); + if (metricService == null) { + metricService = new AcidMetricService(); + metricService.setConf(conf); + } } @After @@ -127,7 +134,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { startInitiator(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 2 active deltas // 1 small delta // 0 obsolete deltas @@ -143,7 +150,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { startWorker(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 0 active deltas // 0 small delta // 2 obsolete deltas @@ -177,7 +184,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { HiveConf.setIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_NUM_THRESHOLD, 2); startInitiator(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 3 active deltas // 2 small deltas // 2 obsolete deltas @@ -193,7 +200,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { startCleaner(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 3 active deltas // 2 small deltas // 0 obsolete delta @@ -209,7 +216,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { startWorker(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 1 active delta // 0 small delta // 3 obsolete deltas @@ -225,7 +232,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { startCleaner(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); verifyDeltaMetricsMatch( ImmutableMap.of(dbName + "." + tblName + Path.SEPARATOR + partName, 1), MetricsConstants.COMPACTION_NUM_DELTAS); @@ -235,6 +242,7 @@ public void testDeltaFileMetricPartitionedTable() throws Exception { verifyDeltaMetricsMatch( ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS); + ms.dropTable(dbName, tblName); } @Test @@ -283,7 +291,7 @@ public void testDeltaFileMetricMultiPartitionedTable() throws Exception { HiveConf.setFloatVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_DELTA_PCT_THRESHOLD, 0.4f); startInitiator(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); verifyDeltaMetricsMatch( ImmutableMap.of( dbName + "." + tblName + Path.SEPARATOR + part1Name, 2, @@ -306,7 +314,7 @@ public void testDeltaFileMetricMultiPartitionedTable() throws Exception { startWorker(); startWorker(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); verifyDeltaMetricsMatch( ImmutableMap.of( dbName + "." + tblName + Path.SEPARATOR + part1Name, 2, @@ -324,7 +332,7 @@ public void testDeltaFileMetricMultiPartitionedTable() throws Exception { startCleaner(); startCleaner(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); verifyDeltaMetricsMatch( ImmutableMap.of( dbName + "." + tblName + Path.SEPARATOR + part1Name, 2, @@ -336,6 +344,7 @@ public void testDeltaFileMetricMultiPartitionedTable() throws Exception { verifyDeltaMetricsMatch( ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS); + ms.dropTable(dbName, tblName); } @Test @@ -364,7 +373,7 @@ public void testDeltaFileMetricUnpartitionedTable() throws Exception { startInitiator(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 2 active deltas // 1 small delta // 0 obsolete deltas @@ -380,7 +389,7 @@ public void testDeltaFileMetricUnpartitionedTable() throws Exception { startWorker(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 0 active delta // 0 small delta // 2 obsolete delta @@ -396,7 +405,7 @@ public void testDeltaFileMetricUnpartitionedTable() throws Exception { startCleaner(); - TimeUnit.SECONDS.sleep(2); + metricService.run(); // 0 active delta // 0 small delta // 0 obsolete delta @@ -409,6 +418,7 @@ public void testDeltaFileMetricUnpartitionedTable() throws Exception { verifyDeltaMetricsMatch( ImmutableMap.of(), MetricsConstants.COMPACTION_NUM_OBSOLETE_DELTAS); + ms.dropTable(dbName, tblName); } private LockComponent createLockComponent(String dbName, String tblName, String partName) { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java index 8a709b5a168a..e775b4567155 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/TestMRCompactorJobQueueConfiguration.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.mapred.JobConf; import org.junit.jupiter.params.ParameterizedTest; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java index e0a2a1f5ed32..8f6814d4890f 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/handler/TestAbortedTxnCleaner.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.ql.io.AcidUtils; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFArrayPosition.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFArrayPosition.java new file mode 100644 index 000000000000..615f1f4b3c64 --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFArrayPosition.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.common.type.Date; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.io.DateWritableV2; +import org.apache.hadoop.hive.serde2.io.DoubleWritable; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.io.FloatWritable; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.junit.Assert; +import org.junit.Test; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.asList; + +public class TestGenericUDFArrayPosition { + private final GenericUDFArrayPosition udf = new GenericUDFArrayPosition(); + + @Test public void testPrimitive() throws HiveException { + ObjectInspector[] inputOIs = { ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableIntObjectInspector), + PrimitiveObjectInspectorFactory.writableIntObjectInspector }; + udf.initialize(inputOIs); + + Object i1 = new IntWritable(3); + Object i2 = new IntWritable(1); + Object i3 = new IntWritable(2); + Object i4 = new IntWritable(1); + Object i5 = new IntWritable(5); + + runAndVerify(asList(i1, i2, i3, i4), i2, 2); + runAndVerify(asList(i1, i2, i3, i4), i5, 0); + + ObjectInspector[] inputOIfs = { ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableFloatObjectInspector), + PrimitiveObjectInspectorFactory.writableFloatObjectInspector }; + udf.initialize(inputOIfs); + + i1 = new FloatWritable(3.3f); + i2 = new FloatWritable(1.1f); + i3 = new FloatWritable(3.3f); + i4 = new FloatWritable(2.20f); + runAndVerify(asList(i1, i2, i3, i4), i1, 1); + runAndVerify(asList(i1, i2, i3, i4),null,null); //Test null element + } + + @Test public void testList() throws HiveException { + ObjectInspector[] inputOIs = { ObjectInspectorFactory.getStandardListObjectInspector( + ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableStringObjectInspector)), + ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableStringObjectInspector) }; + udf.initialize(inputOIs); + + Object i1 = asList(new Text("aa1"), new Text("dd"), new Text("cc"), new Text("bb")); + Object i2 = asList(new Text("aa2"), new Text("cc"), new Text("ba"), new Text("dd")); + Object i3 = asList(new Text("aa3"), new Text("cc"), new Text("dd"), new Text("ee"), new Text("bb")); + Object i4 = asList(new Text("aa4"), new Text("cc"), new Text("ddd"), new Text("bb")); + runAndVerify(asList(i1, i2, i2, i3, i4, i4), i2, 2); + } + + @Test public void testStruct() throws HiveException { + ObjectInspector[] inputOIs = { ObjectInspectorFactory.getStandardListObjectInspector( + ObjectInspectorFactory.getStandardStructObjectInspector(asList("f1", "f2", "f3", "f4"), + asList(PrimitiveObjectInspectorFactory.writableStringObjectInspector, + PrimitiveObjectInspectorFactory.writableDoubleObjectInspector, + PrimitiveObjectInspectorFactory.writableDateObjectInspector, + ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableIntObjectInspector)))), + ObjectInspectorFactory.getStandardStructObjectInspector(asList("f1", "f2", "f3", "f4"), + asList(PrimitiveObjectInspectorFactory.writableStringObjectInspector, + PrimitiveObjectInspectorFactory.writableDoubleObjectInspector, + PrimitiveObjectInspectorFactory.writableDateObjectInspector, + ObjectInspectorFactory.getStandardListObjectInspector( + PrimitiveObjectInspectorFactory.writableIntObjectInspector))) }; + udf.initialize(inputOIs); + + Object i1 = asList(new Text("a"), new DoubleWritable(3.1415), new DateWritableV2(Date.of(2015, 5, 26)), + asList(new IntWritable(1), new IntWritable(3), new IntWritable(2), new IntWritable(4))); + + Object i2 = asList(new Text("b"), new DoubleWritable(3.14), new DateWritableV2(Date.of(2015, 5, 26)), + asList(new IntWritable(1), new IntWritable(3), new IntWritable(2), new IntWritable(4))); + + Object i3 = asList(new Text("a"), new DoubleWritable(3.1415), new DateWritableV2(Date.of(2015, 5, 25)), + asList(new IntWritable(1), new IntWritable(3), new IntWritable(2), new IntWritable(5))); + + Object i4 = asList(new Text("a"), new DoubleWritable(3.1415), new DateWritableV2(Date.of(2015, 5, 25)), + asList(new IntWritable(1), new IntWritable(3), new IntWritable(2), new IntWritable(4))); + + runAndVerify(asList(i1, i3, i2, i3, i4, i2), i2, 3); + } + + @Test public void testMap() throws HiveException { + ObjectInspector[] inputOIs = { ObjectInspectorFactory.getStandardListObjectInspector( + ObjectInspectorFactory.getStandardMapObjectInspector( + PrimitiveObjectInspectorFactory.writableStringObjectInspector, + PrimitiveObjectInspectorFactory.writableIntObjectInspector)), + ObjectInspectorFactory.getStandardMapObjectInspector( + PrimitiveObjectInspectorFactory.writableStringObjectInspector, + PrimitiveObjectInspectorFactory.writableIntObjectInspector) }; + udf.initialize(inputOIs); + + Map m1 = new HashMap<>(); + m1.put(new Text("a"), new IntWritable(4)); + m1.put(new Text("b"), new IntWritable(3)); + m1.put(new Text("c"), new IntWritable(1)); + m1.put(new Text("d"), new IntWritable(2)); + + Map m2 = new HashMap<>(); + m2.put(new Text("d"), new IntWritable(4)); + m2.put(new Text("b"), new IntWritable(3)); + m2.put(new Text("a"), new IntWritable(1)); + m2.put(new Text("c"), new IntWritable(2)); + + Map m3 = new HashMap<>(); + m3.put(new Text("d"), new IntWritable(4)); + m3.put(new Text("b"), new IntWritable(3)); + m3.put(new Text("a"), new IntWritable(1)); + + runAndVerify(asList(m1, m3, m2, m3, m1), m1, 1); + } + + private void runAndVerify(List actual, Object element, Object expected) + throws HiveException { + GenericUDF.DeferredJavaObject[] args = { new GenericUDF.DeferredJavaObject(actual), new GenericUDF.DeferredJavaObject(element) }; + Object result = udf.evaluate(args); + if(expected == null){ + Assert.assertNull(result); + } + else { + Assert.assertEquals("index value", expected, ((IntWritable)result).get()); + } + } +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java b/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java index 386a90fd9445..a10a6aa30f7a 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/util/TestHiveStrictManagedMigration.java @@ -84,7 +84,7 @@ public void testUpgrade() throws Exception { "--oldWarehouseRoot", oldWarehouse}; HiveConf newConf = new HiveConf(hiveConf); File newWarehouseDir = new File(getTestDataDir(), "newWarehouse"); - newConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, newWarehouseDir.getAbsolutePath()); + newConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, newWarehouseDir.getAbsolutePath()); newConf.set("strict.managed.tables.migration.owner", System.getProperty("user.name")); runMigrationTool(newConf, args); @@ -124,7 +124,7 @@ public void testExternalMove() throws Exception { HiveConf newConf = new HiveConf(hiveConf); File newManagedWarehouseDir = new File(getTestDataDir(), "newManaged"); File newExtWarehouseDir = new File(getTestDataDir(), "newExternal"); - newConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, newManagedWarehouseDir.getAbsolutePath()); + newConf.set(HiveConf.ConfVars.METASTORE_WAREHOUSE.varname, newManagedWarehouseDir.getAbsolutePath()); newConf.set(HiveConf.ConfVars.HIVE_METASTORE_WAREHOUSE_EXTERNAL.varname, newExtWarehouseDir.getAbsolutePath()); runMigrationTool(newConf, args); Assert.assertTrue(newExtWarehouseDir.exists()); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java b/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java index 7974da839123..4f24454056b7 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/util/TestUpgradeTool.java @@ -47,7 +47,7 @@ protected String getTestDataDir() { public void testPostUpgrade() throws Exception { int[][] data = {{1, 2}, {3, 4}, {5, 6}}; int[][] dataPart = {{1, 2, 10}, {3, 4, 11}, {5, 6, 12}}; - hiveConf.setVar(HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "dynamic"); + hiveConf.setVar(HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "dynamic"); runStatementOnDriver("drop table if exists TAcid"); runStatementOnDriver("drop table if exists TAcidPart"); runStatementOnDriver("drop table if exists TFlat"); diff --git a/ql/src/test/queries/clientnegative/avro_duplicate_field_name.q b/ql/src/test/queries/clientnegative/avro_duplicate_field_name.q new file mode 100644 index 000000000000..d10af6c15068 --- /dev/null +++ b/ql/src/test/queries/clientnegative/avro_duplicate_field_name.q @@ -0,0 +1,25 @@ +-- verify AVRO-3827: Disallow duplicate field names + +CREATE TABLE avroExternalDupField +STORED AS AVRO +TBLPROPERTIES ('avro.schema.literal'='{ + "namespace": "org.apache.hive", + "name": "my_schema", + "type": "record", + "fields": [ + { + "name": "f1", + "type": { + "name": "a", + "type": "record", + "fields": [] + } + }, { + "name": "f1", + "type": { + "name": "b", + "type": "record", + "fields": [] + } + } + ] }'); diff --git a/ql/src/test/queries/clientnegative/avro_invalid_field_name.q b/ql/src/test/queries/clientnegative/avro_invalid_field_name.q new file mode 100644 index 000000000000..8ce42bc3dbf0 --- /dev/null +++ b/ql/src/test/queries/clientnegative/avro_invalid_field_name.q @@ -0,0 +1,18 @@ +-- verify AVRO-3820:Don't allow invalid field names, field name should match [A-Za-z_][A-Za-z0-9_]* + +CREATE TABLE avroExternalInvalidField +STORED AS AVRO +TBLPROPERTIES ('avro.schema.literal'='{ + "namespace": "org.apache.hive", + "name": "my_record", + "type": "record", + "fields": [ + { + "name": "f1.x", + "type": { + "name": "my_enum", + "type": "enum", + "symbols": ["a"] + } + } + ] }'); diff --git a/ql/src/test/queries/clientnegative/compute_stats_long.q b/ql/src/test/queries/clientnegative/compute_stats_long.q deleted file mode 100644 index bf87393abd0f..000000000000 --- a/ql/src/test/queries/clientnegative/compute_stats_long.q +++ /dev/null @@ -1,7 +0,0 @@ -create table tab_int(a int); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int; - --- this should raise an error since the number of bit vectors has a hard limit at 1024 -select compute_stats(a, 'fm', 10000) from tab_int; diff --git a/ql/src/test/queries/clientnegative/msck_repair_7.q b/ql/src/test/queries/clientnegative/msck_repair_7.q new file mode 100644 index 000000000000..2a7b18b0719b --- /dev/null +++ b/ql/src/test/queries/clientnegative/msck_repair_7.q @@ -0,0 +1,7 @@ +DROP TABLE IF EXISTS repairtable; + +CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING); + +dfs ${system:test.dfs.mkdir} ${hiveconf:hive.metastore.warehouse.dir}/repairtable/p1a; + +MSCK REPAIR TABLE default.repairtable; \ No newline at end of file diff --git a/ql/src/test/queries/clientnegative/msck_repair_8.q b/ql/src/test/queries/clientnegative/msck_repair_8.q new file mode 100644 index 000000000000..d6aee8a7b537 --- /dev/null +++ b/ql/src/test/queries/clientnegative/msck_repair_8.q @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS repairtable; + +CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING); + +MSCK REPAIR TABLE default.repairtable.p1; \ No newline at end of file diff --git a/ql/src/test/queries/clientnegative/udf_enforce_constraint_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_enforce_constraint_wrong_type1.q new file mode 100644 index 000000000000..51d6271d57ff --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_enforce_constraint_wrong_type1.q @@ -0,0 +1 @@ +SELECT ENFORCE_CONSTRAINT(1); diff --git a/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type1.q new file mode 100644 index 000000000000..5fe8a9576421 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type1.q @@ -0,0 +1,2 @@ +--! qt:dataset:src +SELECT EXCEPTION_IN_VERTEX_UDF(value, 1) FROM src diff --git a/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type2.q b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type2.q new file mode 100644 index 000000000000..5fe8a9576421 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type2.q @@ -0,0 +1,2 @@ +--! qt:dataset:src +SELECT EXCEPTION_IN_VERTEX_UDF(value, 1) FROM src diff --git a/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type3.q b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type3.q new file mode 100644 index 000000000000..0cb863d42019 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type3.q @@ -0,0 +1,2 @@ +--! qt:dataset:src +SELECT EXCEPTION_IN_VERTEX_UDF(value, 'Map 1', 99.9) FROM src diff --git a/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type4.q b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type4.q new file mode 100644 index 000000000000..0a112ad44036 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_exception_in_vertex_udf_wrong_type4.q @@ -0,0 +1,2 @@ +--! qt:dataset:src +SELECT EXCEPTION_IN_VERTEX_UDF(value, 'Map 1', 1, 99.9) FROM src diff --git a/ql/src/test/queries/clientnegative/udf_greatest_error_3.q b/ql/src/test/queries/clientnegative/udf_greatest_error_3.q new file mode 100644 index 000000000000..29860dc2f0af --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_greatest_error_3.q @@ -0,0 +1,2 @@ +--! qt:dataset:src +SELECT GREATEST('2.0', array('a', 'b')) FROM src LIMIT 1; diff --git a/ql/src/test/queries/clientnegative/udf_grouping_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_grouping_wrong_type1.q new file mode 100644 index 000000000000..d985bc36c0e1 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_grouping_wrong_type1.q @@ -0,0 +1 @@ +SELECT GROUPING(1, ARRAY(2)); diff --git a/ql/src/test/queries/clientnegative/udf_isfalse_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_isfalse_wrong_type1.q new file mode 100644 index 000000000000..b15a05ded872 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_isfalse_wrong_type1.q @@ -0,0 +1 @@ +SELECT ISFALSE(ARRAY(1)) diff --git a/ql/src/test/queries/clientnegative/udf_isnotfalse_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_isnotfalse_wrong_type1.q new file mode 100644 index 000000000000..eb3de6996553 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_isnotfalse_wrong_type1.q @@ -0,0 +1 @@ +SELECT ISNOTFALSE(ARRAY(1)); diff --git a/ql/src/test/queries/clientnegative/udf_isnottrue_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_isnottrue_wrong_type1.q new file mode 100644 index 000000000000..aa36ac24721c --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_isnottrue_wrong_type1.q @@ -0,0 +1 @@ +SELECT ISNOTTRUE(ARRAY(1)); diff --git a/ql/src/test/queries/clientnegative/udf_istrue_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_istrue_wrong_type1.q new file mode 100644 index 000000000000..be6742ad1247 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_istrue_wrong_type1.q @@ -0,0 +1 @@ +SELECT ISTRUE(ARRAY(1)); diff --git a/ql/src/test/queries/clientnegative/udf_split_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_split_wrong_type1.q new file mode 100644 index 000000000000..f9cd7c5ac3ba --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_split_wrong_type1.q @@ -0,0 +1 @@ +SELECT SPLIT(ARRAY('1,2,3'), ','); diff --git a/ql/src/test/queries/clientnegative/udf_split_wrong_type2.q b/ql/src/test/queries/clientnegative/udf_split_wrong_type2.q new file mode 100644 index 000000000000..2ce53c86b9a7 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_split_wrong_type2.q @@ -0,0 +1 @@ +SELECT SPLIT('1,2,3', ARRAY(',', ':')); diff --git a/ql/src/test/queries/clientnegative/udf_sq_count_check_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_sq_count_check_wrong_type1.q new file mode 100644 index 000000000000..261f44b92dd3 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_sq_count_check_wrong_type1.q @@ -0,0 +1 @@ +SELECT SQ_COUNT_CHECK(ARRAY(1)); diff --git a/ql/src/test/queries/clientnegative/udf_to_epoch_milli_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_to_epoch_milli_wrong_type1.q new file mode 100644 index 000000000000..4438c00eae4d --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_to_epoch_milli_wrong_type1.q @@ -0,0 +1,2 @@ +--! qt:dataset:src +SELECT TO_EPOCH_MILLI(1) FROM src diff --git a/ql/src/test/queries/clientnegative/udf_trunc_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_trunc_wrong_type1.q new file mode 100644 index 000000000000..9ef1294366f2 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_trunc_wrong_type1.q @@ -0,0 +1 @@ +SELECT TRUNC(ARRAY('2023-12-04'), 'MM'); diff --git a/ql/src/test/queries/clientnegative/udf_trunc_wrong_type2.q b/ql/src/test/queries/clientnegative/udf_trunc_wrong_type2.q new file mode 100644 index 000000000000..8746bf4c9116 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_trunc_wrong_type2.q @@ -0,0 +1 @@ +SELECT TRUNC('2023-12-04', ARRAY('MM')); diff --git a/ql/src/test/queries/clientnegative/udf_tumbling_window_check_wrong_type1.q b/ql/src/test/queries/clientnegative/udf_tumbling_window_check_wrong_type1.q new file mode 100644 index 000000000000..afede1661592 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_tumbling_window_check_wrong_type1.q @@ -0,0 +1 @@ +SELECT TUMBLING_WINDOW(ARRAY('2020-03-01 06:03:00'), interval '5' MINUTES); diff --git a/ql/src/test/queries/clientnegative/udf_tumbling_window_check_wrong_type2.q b/ql/src/test/queries/clientnegative/udf_tumbling_window_check_wrong_type2.q new file mode 100644 index 000000000000..24837bf5c7bc --- /dev/null +++ b/ql/src/test/queries/clientnegative/udf_tumbling_window_check_wrong_type2.q @@ -0,0 +1 @@ +SELECT TUMBLING_WINDOW(CAST('2020-03-01 06:03:00' as timestamp), ARRAY(interval '5' MINUTES)); diff --git a/ql/src/test/queries/clientnegative/udtf_stack_not_constant.q b/ql/src/test/queries/clientnegative/udtf_stack_not_constant.q new file mode 100644 index 000000000000..f559ce304621 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udtf_stack_not_constant.q @@ -0,0 +1,2 @@ +--! qt:dataset:alltypesparquet +SELECT STACK(cint, 'a', 'b') FROM alltypesparquet; diff --git a/ql/src/test/queries/clientnegative/udtf_stack_null.q b/ql/src/test/queries/clientnegative/udtf_stack_null.q new file mode 100644 index 000000000000..f3832ffd4adb --- /dev/null +++ b/ql/src/test/queries/clientnegative/udtf_stack_null.q @@ -0,0 +1 @@ +SELECT stack(cast(null as int), 'a', 'b', 'c', 'd'); diff --git a/ql/src/test/queries/clientnegative/udtf_stack_wrong_type1.q b/ql/src/test/queries/clientnegative/udtf_stack_wrong_type1.q new file mode 100644 index 000000000000..90762becb234 --- /dev/null +++ b/ql/src/test/queries/clientnegative/udtf_stack_wrong_type1.q @@ -0,0 +1 @@ +SELECT stack('2', 'a', 'b', 'c', 'd'); diff --git a/ql/src/test/queries/clientpositive/bucket_decimal_col_select.q b/ql/src/test/queries/clientpositive/bucket_decimal_col_select.q new file mode 100644 index 000000000000..33920c791fa6 --- /dev/null +++ b/ql/src/test/queries/clientpositive/bucket_decimal_col_select.q @@ -0,0 +1,6 @@ +set hive.tez.bucket.pruning=true; + +create table bucket_table(id decimal(38,0), name string) clustered by(id) into 3 buckets; +insert into bucket_table values(5000000000000999640711, 'Cloud'); + +select * from bucket_table bt where id = 5000000000000999640711; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/cast_null_to_complex.q b/ql/src/test/queries/clientpositive/cast_null_to_complex.q new file mode 100644 index 000000000000..323e4bda0fc3 --- /dev/null +++ b/ql/src/test/queries/clientpositive/cast_null_to_complex.q @@ -0,0 +1,13 @@ +SET hive.cli.print.header=true; + +explain cbo +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>); +explain +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>); +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>); + + +create table t1 as +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>); + +describe formatted t1; diff --git a/ql/src/test/queries/clientpositive/cbo_constantfolding.q b/ql/src/test/queries/clientpositive/cbo_constantfolding.q new file mode 100644 index 000000000000..25f6f1345b78 --- /dev/null +++ b/ql/src/test/queries/clientpositive/cbo_constantfolding.q @@ -0,0 +1,5 @@ +EXPLAIN CBO SELECT DATE_ADD('2023-01-01', 1); +EXPLAIN CBO SELECT DATE_SUB('2023-01-01', 1); +EXPLAIN CBO SELECT FROM_UNIXTIME(1672560000); +EXPLAIN CBO SELECT TO_UNIX_TIMESTAMP(DATE '2023-01-01'); +EXPLAIN CBO SELECT UNIX_TIMESTAMP(DATE '2023-01-01'); diff --git a/ql/src/test/queries/clientpositive/char_udf1.q b/ql/src/test/queries/clientpositive/char_udf1.q index 204404361502..4a6ac4020c36 100644 --- a/ql/src/test/queries/clientpositive/char_udf1.q +++ b/ql/src/test/queries/clientpositive/char_udf1.q @@ -1,4 +1,5 @@ --! qt:dataset:src +--! qt:dataset:temp_udf drop table char_udf_1; create table char_udf_1 (c1 string, c2 string, c3 char(10), c4 char(20)); diff --git a/ql/src/test/queries/clientpositive/complex_datatype.q b/ql/src/test/queries/clientpositive/complex_datatype.q new file mode 100644 index 000000000000..49cc35f6a1be --- /dev/null +++ b/ql/src/test/queries/clientpositive/complex_datatype.q @@ -0,0 +1,232 @@ +set hive.default.fileformat=TEXTFILE; +set hive.fetch.task.conversion=none; +set hive.llap.io.enabled=false; +set hive.vectorized.execution.enabled=true; + +create EXTERNAL table `complex_map_array_table` as +select +'bob' as name, + MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column2; + +create EXTERNAL table `complex_map_struct_table` as +select +'bob' as name, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column2; + + +create EXTERNAL table `complex_table1` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +'bob' as name, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column3; + +create EXTERNAL table `complex_table2` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column2; + +create EXTERNAL table `complex_table3` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +MAP( + "Key3", + ARRAY( + 7, + 8, + 9 + ), + "Key4", + ARRAY( + 10, + 11, + 12 + ) + ) as column2; + +-- The below scenario's was working before fix +create EXTERNAL table `complex_array_map_table` as +select +'bob' as name, +ARRAY( + MAP( + "Key1", + "Value1" + ), + MAP( + "Key2", + "Value2" + ) + ) as column2; + +create EXTERNAL table `complex_map_map_table` as +select + 'bob' as name, + MAP( + "Key1", + MAP( + 1, + 2 + ), + "Key2", + MAP( + 3, + 4 + ) + ) as column2; + +create EXTERNAL table `complex_combined_table` as +select + ARRAY('arr_val1', 'arr_val2', 'arr_val3') as column1, + 'bob' as column2, + MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column3, + NAMED_STRUCT('abc', '7', 'def', '8') as column4, + MAP( + "Key3", + "Value3", + "Key4", + "Value4" + ) as column5; + +-- with vectorization set as "true" +select * from complex_map_array_table; +select * from complex_map_struct_table; +select * from complex_table1; +select * from complex_table2; +select * from complex_table3; +select * from complex_array_map_table; +select * from complex_map_map_table; +select * from complex_combined_table; + +-- with fetch task conversion set as "more" +set hive.fetch.task.conversion=more; + +select * from complex_map_array_table; +select * from complex_map_struct_table; +select * from complex_table1; +select * from complex_table2; +select * from complex_table3; +select * from complex_array_map_table; +select * from complex_map_map_table; +select * from complex_combined_table; + +-- with vectorization set as "false" +set hive.vectorized.execution.enabled=false; + +select * from complex_map_array_table; +select * from complex_map_struct_table; +select * from complex_table1; +select * from complex_table2; +select * from complex_table3; +select * from complex_array_map_table; +select * from complex_map_map_table; +select * from complex_combined_table; diff --git a/ql/src/test/queries/clientpositive/compute_stats_binary.q b/ql/src/test/queries/clientpositive/compute_stats_binary.q deleted file mode 100644 index fd15634f202f..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_binary.q +++ /dev/null @@ -1,9 +0,0 @@ -create table tab_binary(a binary); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary; - -select count(*) from tab_binary; - --- compute statistical summary of data -select compute_stats(a, 16) from tab_binary; diff --git a/ql/src/test/queries/clientpositive/compute_stats_boolean.q b/ql/src/test/queries/clientpositive/compute_stats_boolean.q deleted file mode 100644 index cddb53f8f64f..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_boolean.q +++ /dev/null @@ -1,9 +0,0 @@ -create table tab_bool(a boolean); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool; - -select count(*) from tab_bool; - --- compute statistical summary of data -select compute_stats(a, 16) from tab_bool; diff --git a/ql/src/test/queries/clientpositive/compute_stats_date.q b/ql/src/test/queries/clientpositive/compute_stats_date.q deleted file mode 100644 index bf478526ba8a..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_date.q +++ /dev/null @@ -1,28 +0,0 @@ - -create table tab_date ( - origin_city_name string, - dest_city_name string, - fl_date date, - arr_delay float, - fl_num int -); - --- insert some data -load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date; - -select count(*) from tab_date; - --- compute statistical summary of data -select compute_stats(fl_date, 'hll') from tab_date; - -explain -analyze table tab_date compute statistics for columns fl_date; - -analyze table tab_date compute statistics for columns fl_date; - -describe formatted tab_date fl_date; - --- Update stats manually. Try both yyyy-mm-dd and integer value for high/low value -alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0'); - -describe formatted tab_date fl_date; diff --git a/ql/src/test/queries/clientpositive/compute_stats_decimal.q b/ql/src/test/queries/clientpositive/compute_stats_decimal.q deleted file mode 100644 index 2beafaf219a1..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_decimal.q +++ /dev/null @@ -1,11 +0,0 @@ -set hive.stats.autogather=true; - -create table tab_decimal(a decimal(35,3)); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/decimal.txt" INTO TABLE tab_decimal; - -select count(*) from tab_decimal; - --- compute statistical summary of data -select compute_stats(a, 'fm', 18) from tab_decimal; diff --git a/ql/src/test/queries/clientpositive/compute_stats_double.q b/ql/src/test/queries/clientpositive/compute_stats_double.q deleted file mode 100644 index 6bae0643a897..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_double.q +++ /dev/null @@ -1,9 +0,0 @@ -create table tab_double(a double); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/double.txt" INTO TABLE tab_double; - -select count(*) from tab_double; - --- compute statistical summary of data -select compute_stats(a, 'fm', 16) from tab_double; diff --git a/ql/src/test/queries/clientpositive/compute_stats_empty_table.q b/ql/src/test/queries/clientpositive/compute_stats_empty_table.q deleted file mode 100644 index d4ed93fa6398..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_empty_table.q +++ /dev/null @@ -1,12 +0,0 @@ -create table tab_empty(a boolean, b int, c double, d string, e binary); - -select count(*) from tab_empty; - --- compute statistical summary of data -select compute_stats(a, 16) from tab_empty; -select compute_stats(b, 16) from tab_empty; -select compute_stats(c, 16) from tab_empty; -select compute_stats(d, 16) from tab_empty; -select compute_stats(e, 16) from tab_empty; - - diff --git a/ql/src/test/queries/clientpositive/compute_stats_long.q b/ql/src/test/queries/clientpositive/compute_stats_long.q deleted file mode 100644 index 48f4ebb9791d..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_long.q +++ /dev/null @@ -1,9 +0,0 @@ -create table tab_int(a int); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int; - -select count(*) from tab_int; - --- compute statistical summary of data -select compute_stats(a, 'fm', 16) from tab_int; diff --git a/ql/src/test/queries/clientpositive/compute_stats_string.q b/ql/src/test/queries/clientpositive/compute_stats_string.q deleted file mode 100644 index 79a531e8ec4d..000000000000 --- a/ql/src/test/queries/clientpositive/compute_stats_string.q +++ /dev/null @@ -1,9 +0,0 @@ -create table tab_string(a string); - --- insert some data -LOAD DATA LOCAL INPATH "../../data/files/string.txt" INTO TABLE tab_string; - -select count(*) from tab_string; - --- compute statistical summary of data -select compute_stats(a, 'fm', 16) from tab_string; diff --git a/ql/src/test/queries/clientpositive/cte_mat_11.q b/ql/src/test/queries/clientpositive/cte_mat_11.q new file mode 100644 index 000000000000..f1ace684b45a --- /dev/null +++ b/ql/src/test/queries/clientpositive/cte_mat_11.q @@ -0,0 +1,51 @@ +--! qt:dataset:src +set hive.optimize.cte.materialize.threshold=2; +set hive.optimize.cte.materialize.full.aggregate.only=false; + +EXPLAIN WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT a.key + FROM materialized_cte1 a + JOIN materialized_cte1 b ON (a.key = b.key) +) +SELECT a.key +FROM materialized_cte2 a +JOIN materialized_cte2 b ON (a.key = b.key); + +EXPLAIN CBO WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT a.key + FROM materialized_cte1 a + JOIN materialized_cte1 b ON (a.key = b.key) +) +SELECT a.key +FROM materialized_cte2 a +JOIN materialized_cte2 b ON (a.key = b.key); + +EXPLAIN WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT * FROM materialized_cte1 + UNION ALL + SELECT * FROM materialized_cte1 +) +SELECT * FROM materialized_cte2 +UNION ALL +SELECT * FROM materialized_cte2; + +EXPLAIN CBO WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT * FROM materialized_cte1 + UNION ALL + SELECT * FROM materialized_cte1 +) +SELECT * FROM materialized_cte2 +UNION ALL +SELECT * FROM materialized_cte2; diff --git a/ql/src/test/queries/clientpositive/empty_result_ctas.q b/ql/src/test/queries/clientpositive/empty_result_ctas.q index 0a1fc91c11aa..9437e0c1e6ab 100644 --- a/ql/src/test/queries/clientpositive/empty_result_ctas.q +++ b/ql/src/test/queries/clientpositive/empty_result_ctas.q @@ -3,3 +3,15 @@ SET hive.cli.print.header=true; CREATE TABLE T1 (c_primitive int, c_array array, c_nested array, f3:array>>); CREATE TABLE T2 AS SELECT * FROM T1 LIMIT 0; DESCRIBE FORMATTED t2; + +-- empty source table +CREATE TABLE T3 AS SELECT * FROM T1; +DESCRIBE FORMATTED t3; + +create table table1 (a string, b string); +create table table2 (complex_column array, `values`:array>>>>); + +-- empty result subquery +create table table3 as with t1 as (select * from table1), t2 as (select * from table2 where 1=0) select t1.*, t2.* from t1 left join t2; + +describe formatted table3; diff --git a/ql/src/test/queries/clientpositive/groupby_sort_2_23.q b/ql/src/test/queries/clientpositive/groupby_sort_2_23.q new file mode 100644 index 000000000000..b241bee68550 --- /dev/null +++ b/ql/src/test/queries/clientpositive/groupby_sort_2_23.q @@ -0,0 +1,10 @@ +set hive.mapred.mode=nonstrict; +set hive.map.aggr=true; +set hive.explain.user=false; + +create table test_bucket(age int, name string, dept string) clustered by (age, name) sorted by (age asc, name asc) into 2 buckets stored as ORC; +insert into test_bucket values (1, 'user1', 'dept1'), ( 2, 'user2' , 'dept2'); +insert into test_bucket values (1, 'user1', 'dept1'), ( 2, 'user2' , 'dept2'); + +explain vectorization detail select age, name, count(*) from test_bucket group by age, name having count(*) > 1; +select age, name, count(*) from test_bucket group by age, name having count(*) > 1; diff --git a/ql/src/test/queries/clientpositive/insert_and_load_overwrite_drop_partition.q b/ql/src/test/queries/clientpositive/insert_and_load_overwrite_drop_partition.q new file mode 100644 index 000000000000..fabf49022804 --- /dev/null +++ b/ql/src/test/queries/clientpositive/insert_and_load_overwrite_drop_partition.q @@ -0,0 +1,61 @@ +CREATE EXTERNAL TABLE `table1`( + `name` string, + `number` string) +PARTITIONED BY ( + `part_col` string); + +CREATE EXTERNAL TABLE `table2`( + `name` string, + `number` string) +PARTITIONED BY ( + `part_col` string); + +insert into table table1 values ('a', '10', 'part1'); +insert into table table1 values ('b', '11', 'part1'); +insert into table table1 values ('a2', '2', 'part2'); + +insert into table table2 values ('x', '100', 'part1'); +insert into table table2 values ('y', '101', 'part1'); +insert into table table2 values ('z', '102', 'part1'); +insert into table table2 values ('x2', '200', 'part2'); +insert into table table2 values ('y2', '201', 'part2'); +insert into table table2 values ('x3', '300', 'part3'); + +--non empty input case +alter table table2 drop partition(part_col='part1'); + +select count(*) from table2 where part_col='part1'; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/table2/part_col=part1; + +insert overwrite table table2 partition(part_col='part1') select name, number from table1 where part_col='part1'; + +select count(*) from table2 where part_col='part1'; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/table2/part_col=part1; + +--empty input case +alter table table2 drop partition(part_col='part2'); + +select count(*) from table2 where part_col='part2'; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/table2/part_col=part2; + +insert overwrite table table2 partition(part_col='part2') select name, number from table1 where part_col='dummy_part'; + +select count(*) from table2 where part_col='part2'; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/table2/part_col=part2; + +--load overwrite partition +alter table table2 drop partition(part_col='part3'); + +select count(*) from table2 where part_col='part3'; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/table2/part_col=part3; + +LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' OVERWRITE INTO TABLE table2 PARTITION(part_col='part3'); + +select count(*) from table2 where part_col='part3'; + +dfs -ls ${hiveconf:hive.metastore.warehouse.dir}/table2/part_col=part3; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/like_control_characters.q b/ql/src/test/queries/clientpositive/like_control_characters.q new file mode 100644 index 000000000000..5f9772ed2ef9 --- /dev/null +++ b/ql/src/test/queries/clientpositive/like_control_characters.q @@ -0,0 +1,13 @@ +set hive.mapred.mode=nonstrict; +set hive.explain.user=false; +set hive.vectorized.execution.enabled=true; + +create temporary table foo (col string); + +-- SORT_QUERY_RESULTS + +LOAD DATA LOCAL INPATH '../../data/files/control_characters.txt' INTO TABLE foo; + +explain select col, count(*) from foo where col like '%fg%' group by col; +select col, count(*) from foo where col like '%fg%' group by col; + diff --git a/ql/src/test/queries/clientpositive/mapjoin_date.q b/ql/src/test/queries/clientpositive/mapjoin_date.q new file mode 100644 index 000000000000..8126c017a90c --- /dev/null +++ b/ql/src/test/queries/clientpositive/mapjoin_date.q @@ -0,0 +1,8 @@ +set hive.auto.convert.join=true; + +CREATE TABLE person (fname string, birthDate date); +INSERT INTO person VALUES ('Victor', '2023-11-27'), ('Alexandre', '2023-11-28'); + +EXPLAIN VECTORIZATION DETAIL SELECT * FROM person p1 INNER JOIN person p2 ON p1.birthDate=p2.birthDate; + +SELECT * FROM person p1 INNER JOIN person p2 ON p1.birthDate=p2.birthDate; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_12.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_12.q new file mode 100644 index 000000000000..5a159b7e4b88 --- /dev/null +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_12.q @@ -0,0 +1,36 @@ +-- Test rebuild of materialized view without aggregate when source tables have delete operations since last rebuild. +-- Incremental rebuild is not available. + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +create table t1 (a int, b int) stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into t1 values +(3, 3), +(2, 1), +(2, 2), +(1, 2), +(1, 1); + +CREATE MATERIALIZED VIEW mat1 + TBLPROPERTIES ('transactional'='true') AS +SELECT a +FROM t1 +WHERE b < 10; + +delete from t1 where b = 2; + +explain +alter materialized view mat1 rebuild; + +alter materialized view mat1 rebuild; + +explain cbo +SELECT a +FROM t1 +WHERE b < 10; + +SELECT a +FROM t1 +WHERE b < 10; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q index 5a540250838e..cdaf1abd091b 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_5.q @@ -11,6 +11,8 @@ set hive.materializedview.rewriting=true; create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); insert into cmv_basetable_n6 values + (1, 'alfred', 10.30, 2), + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), @@ -20,6 +22,7 @@ insert into cmv_basetable_n6 values create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); insert into cmv_basetable_2_n3 values + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_6_aggr_2joins.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_6_aggr_2joins.q new file mode 100644 index 000000000000..823c690696f2 --- /dev/null +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_6_aggr_2joins.q @@ -0,0 +1,49 @@ +-- Test Incremental rebuild of materialized view with aggregate and count(*) and two joined tables +-- when records is deleted from one source table and another is inserted into the other table with the same join key values. + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into cmv_basetable_n6 values +(1, 'alfred', 10.30, 2), +(1, 'charlie', 20.30, 2), +(2, 'zoe', 100.30, 2); + +create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into cmv_basetable_2_n3 values +(1, 'bob', 30.30, 2), +(1, 'bonnie', 40.30, 2), +(2, 'joe', 130.30, 2); + +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b; + +CREATE MATERIALIZED VIEW cmv_mat_view_n6 TBLPROPERTIES ('transactional'='true') AS +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b; + +insert into cmv_basetable_n6 values +(1, 'kevin', 50.30, 2); + +DELETE FROM cmv_basetable_2_n3 WHERE b = 'bonnie'; + +EXPLAIN CBO +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; + +EXPLAIN +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; + +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD; + +select * from cmv_mat_view_n6; + +drop materialized view cmv_mat_view_n6; + +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_6_aggr_3joins.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_6_aggr_3joins.q new file mode 100644 index 000000000000..f1b83eb2cf48 --- /dev/null +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_6_aggr_3joins.q @@ -0,0 +1,77 @@ +-- Test Incremental rebuild of materialized view with aggregate and count(*) and 3 joined tables +-- when records is deleted from one source table and another is inserted into the other table with the same join key values. + +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into cmv_basetable_n6 values +(1, 'alfred', 10.30, 2), +(1, 'charlie', 20.30, 2), +(2, 'zoe', 100.30, 2); + +create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into cmv_basetable_2_n3 values +(1, 'bob', 30.30, 2), +(1, 'bonnie', 40.30, 2), +(2, 'joe', 130.30, 2); + +create table t3 (a int, b varchar(256), c decimal(10,2)) stored as orc TBLPROPERTIES ('transactional'='true'); + +insert into t3 values +(1, 'foo', 30.30), +(1, 'bar', 30.30), +(2, 'bar', 30.30); + +CREATE MATERIALIZED VIEW mat1 TBLPROPERTIES ('transactional'='true') AS +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, count(*) +FROM cmv_basetable_n6 +JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +JOIN t3 ON (t3.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_n6.c > 10 OR cmv_basetable_2_n3.c > 10 +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c; + +insert into cmv_basetable_n6 values +(1, 'kevin', 50.30, 2); + +insert into t3 values +(1, 'new rec', 60.30); + +DELETE FROM cmv_basetable_2_n3 WHERE b = 'bonnie'; + + +EXPLAIN CBO +ALTER MATERIALIZED VIEW mat1 REBUILD; + +EXPLAIN +ALTER MATERIALIZED VIEW mat1 REBUILD; + +ALTER MATERIALIZED VIEW mat1 REBUILD; + +select * from mat1; + + +-- Delete only from one table, do not change the rest of the tables +delete from cmv_basetable_n6 where b = 'kevin'; + +EXPLAIN CBO +ALTER MATERIALIZED VIEW mat1 REBUILD; + +EXPLAIN +ALTER MATERIALIZED VIEW mat1 REBUILD; + +ALTER MATERIALIZED VIEW mat1 REBUILD; + +select * from mat1; + + +drop materialized view mat1; + +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, count(*) +FROM cmv_basetable_n6 +JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +JOIN t3 ON (t3.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_n6.c > 10 OR cmv_basetable_2_n3.c > 10 +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c; diff --git a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q index 651bcb4319e7..aef9cb9965c8 100644 --- a/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q +++ b/ql/src/test/queries/clientpositive/materialized_view_create_rewrite_8.q @@ -7,6 +7,8 @@ set hive.materializedview.rewriting=true; create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true'); insert into cmv_basetable_n6 values + (1, 'alfred', 10.30, 2), + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), @@ -16,6 +18,7 @@ insert into cmv_basetable_n6 values create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true', 'transactional_properties'='insert_only'); insert into cmv_basetable_2_n3 values + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3); diff --git a/ql/src/test/queries/clientnegative/materialized_view_no_cbo_rewrite_2.q b/ql/src/test/queries/clientpositive/materialized_view_no_cbo_rewrite.q similarity index 100% rename from ql/src/test/queries/clientnegative/materialized_view_no_cbo_rewrite_2.q rename to ql/src/test/queries/clientpositive/materialized_view_no_cbo_rewrite.q diff --git a/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_10.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_10.q new file mode 100644 index 000000000000..43deec4a8142 --- /dev/null +++ b/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_10.q @@ -0,0 +1,11 @@ +-- Materialzed view definition has non-deterministic function +set hive.support.concurrency=true; +set hive.txn.manager=org.apache.hadoop.hive.ql.lockmgr.DbTxnManager; + +CREATE TABLE EMPS (ENAME STRING, BIRTH_EPOCH_SECS INT) STORED AS ORC TBLPROPERTIES ('transactional'='true'); + +CREATE MATERIALIZED VIEW v_emp AS SELECT * FROM EMPS WHERE BIRTH_EPOCH_SECS <= UNIX_TIMESTAMP(); + +-- View can not be used +explain cbo +SELECT * FROM EMPS WHERE BIRTH_EPOCH_SECS <= UNIX_TIMESTAMP(); diff --git a/ql/src/test/queries/clientnegative/materialized_view_no_supported_op_rewrite_2.q b/ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_11.q similarity index 100% rename from ql/src/test/queries/clientnegative/materialized_view_no_supported_op_rewrite_2.q rename to ql/src/test/queries/clientpositive/materialized_view_rewrite_by_text_11.q diff --git a/ql/src/test/queries/clientpositive/not_in_scenarios.q b/ql/src/test/queries/clientpositive/not_in_scenarios.q new file mode 100644 index 000000000000..6d614bd2c02c --- /dev/null +++ b/ql/src/test/queries/clientpositive/not_in_scenarios.q @@ -0,0 +1,8 @@ +CREATE TABLE my_tbl (id int); +insert into my_tbl values (100),(200),(300); +select * from my_tbl where id not in ('ABC', 'DEF'); +select * from my_tbl where id not in ('ABC', 'DEF', '123'); +select * from my_tbl where id not in ('ABC', 'DEF', '100'); +select * from my_tbl where id not in (100, 'ABC', 200); +select * from my_tbl where id is not null or id in ("ABC"); +select * from my_tbl where id is not null and id in ("ABC"); \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/partition_timestamp3.q b/ql/src/test/queries/clientpositive/partition_timestamp3.q new file mode 100644 index 000000000000..b408848d6226 --- /dev/null +++ b/ql/src/test/queries/clientpositive/partition_timestamp3.q @@ -0,0 +1,6 @@ +--! qt:timezone:Europe/Paris +DROP TABLE IF EXISTS payments; +CREATE EXTERNAL TABLE payments (card string) PARTITIONED BY(txn_datetime TIMESTAMP) STORED AS ORC; +INSERT into payments VALUES('3333-4444-2222-9999', '2023-03-26 02:30:00'), ('3333-4444-2222-9999', '2023-03-26 03:30:00'); +SELECT * FROM payments WHERE txn_datetime = '2023-03-26 02:30:00'; +SELECT * FROM payments WHERE txn_datetime = '2023-03-26 03:30:00'; diff --git a/ql/src/test/queries/clientpositive/ptf_register_use.q b/ql/src/test/queries/clientpositive/ptf_register_use.q new file mode 100644 index 000000000000..ff899a283dc0 --- /dev/null +++ b/ql/src/test/queries/clientpositive/ptf_register_use.q @@ -0,0 +1,5 @@ +--! qt:dataset:temp_udaf + +explain select ptf_register_use_func() over(); + +select ptf_register_use_func() over(); diff --git a/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q b/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q index f92229560ae2..8af3ff493857 100644 --- a/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q +++ b/ql/src/test/queries/clientpositive/reduceSinkDeDuplication_pRS_key_empty.q @@ -1,4 +1,5 @@ --! qt:dataset:src +--! qt:dataset:temp_udf set hive.mapred.mode=nonstrict; set hive.cbo.enable=false; diff --git a/ql/src/test/queries/clientpositive/rename_table.q b/ql/src/test/queries/clientpositive/rename_table.q new file mode 100644 index 000000000000..1e4f2fcdd550 --- /dev/null +++ b/ql/src/test/queries/clientpositive/rename_table.q @@ -0,0 +1,40 @@ +--! qt:dataset:src +--! qt:dataset:part +-- This test verifies that if the table after rename can still fetch the column statistics +set hive.stats.kll.enable=true; +set metastore.stats.fetch.bitvector=true; +set metastore.stats.fetch.kll=true; +set hive.stats.autogather=true; +set hive.stats.column.autogather=true; + +CREATE TABLE rename_partition_table0 (key STRING, value STRING) PARTITIONED BY (part STRING) +STORED AS ORC; + +INSERT OVERWRITE TABLE rename_partition_table0 PARTITION (part = '1') SELECT * FROM src where rand(1) < 0.5; +ALTER TABLE rename_partition_table0 ADD COLUMNS (new_col INT); +INSERT OVERWRITE TABLE rename_partition_table0 PARTITION (part = '2') SELECT src.*, 1 FROM src; + +ALTER TABLE rename_partition_table0 RENAME TO rename_partition_table1; +DESCRIBE FORMATTED rename_partition_table1; +DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='1') key; +DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='1') value; +DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') key; +DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') value; +DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') new_col; + +CREATE EXTERNAL TABLE rename_partition_table_ext0 (key STRING, value STRING) PARTITIONED BY (part STRING) +STORED AS ORC; + +INSERT OVERWRITE TABLE rename_partition_table_ext0 PARTITION (part = '1') SELECT * FROM src where rand(1) < 0.5; +ALTER TABLE rename_partition_table_ext0 CHANGE COLUMN value val STRING CASCADE; +INSERT OVERWRITE TABLE rename_partition_table_ext0 PARTITION (part = '2') SELECT * FROM src; + +ALTER TABLE rename_partition_table_ext0 RENAME TO rename_partition_table_ext1; +DESCRIBE FORMATTED rename_partition_table_ext1; +DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='1') key; +DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='1') val; +DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='2') key; +DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='2') val; + +DROP TABLE rename_partition_table1; +DROP TABLE rename_partition_table_ext1; diff --git a/ql/src/test/queries/clientpositive/sharedwork_virtualcol_schema_merge.q b/ql/src/test/queries/clientpositive/sharedwork_virtualcol_schema_merge.q new file mode 100644 index 000000000000..c471201222a9 --- /dev/null +++ b/ql/src/test/queries/clientpositive/sharedwork_virtualcol_schema_merge.q @@ -0,0 +1,35 @@ +set hive.optimize.shared.work.merge.ts.schema=false; + +create table t1(a int); + +-- 3 map vertices scans table t1 +explain +WITH t AS ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a, row_number() OVER (partition by INPUT__FILE__NAME) rn from t1 + where a = 1 + ) q + where rn=1 +) +select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from t1 where NOT (a = 1) AND INPUT__FILE__NAME IN (select INPUT__FILE__NAME from t) +union all +select * from t; + + + +set hive.optimize.shared.work.merge.ts.schema=true; + +-- 2 of 3 map vertices scanning table t1 are merged: +-- One projects BLOCK__OFFSET__INSIDE__FILE and INPUT__FILE__NAME and the second one projects INPUT__FILE__NAME only. +-- These are merged to one scan which projects both. +explain +WITH t AS ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a, row_number() OVER (partition by INPUT__FILE__NAME) rn from t1 + where a = 1 + ) q + where rn=1 +) +select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from t1 where NOT (a = 1) AND INPUT__FILE__NAME IN (select INPUT__FILE__NAME from t) +union all +select * from t; diff --git a/ql/src/test/queries/clientpositive/subquery_complex_correlation_predicates.q b/ql/src/test/queries/clientpositive/subquery_complex_correlation_predicates.q index ccfed8dcb23a..1d6dbbee8c04 100644 --- a/ql/src/test/queries/clientpositive/subquery_complex_correlation_predicates.q +++ b/ql/src/test/queries/clientpositive/subquery_complex_correlation_predicates.q @@ -78,3 +78,18 @@ where not exists (select a_authorkey from author a where coalesce(b.b_authorkey, 400) = coalesce(a.a_authorkey, 400)); + +-- HIVE-27801: Exists subquery rewrite results in a wrong plan +drop table if exists store_sales; +create table store_sales (promo_sk int, sales_price int, list_price int); + +insert into store_sales values (1, 20, 15), (1, 15, 20), (1, 10, 15); + +explain cbo +select * from store_sales A where exists( +select 1 from store_sales B + where A.promo_sk = B.promo_sk and A.sales_price > B.list_price and A.sales_price < B.sales_price); + +select * from store_sales A where exists( +select 1 from store_sales B + where A.promo_sk = B.promo_sk and A.sales_price > B.list_price and A.sales_price < B.sales_price); diff --git a/ql/src/test/queries/clientpositive/subquery_join.q b/ql/src/test/queries/clientpositive/subquery_join.q new file mode 100644 index 000000000000..f9dc9795e060 --- /dev/null +++ b/ql/src/test/queries/clientpositive/subquery_join.q @@ -0,0 +1,17 @@ +create table t1 (id int); +create table t2 (id int); + +explain cbo select id, + (select count(*) from t1 join t2 on t1.id=t2.id) + from t2 +order by id; + +explain cbo select id, + (select count(*) from t1 join t2 using (id)) + from t2 +order by id; + +explain cbo select id, + (select count(*) from t1 join t2 where t1.id=t2.id) + from t2 +order by id; diff --git a/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_4.q b/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_4.q new file mode 100644 index 000000000000..d15307a42de1 --- /dev/null +++ b/ql/src/test/queries/clientpositive/tez_dynpart_hashjoin_4.q @@ -0,0 +1,24 @@ +CREATE TABLE table_a (start_date date, product_id int); + +ALTER TABLE table_a UPDATE STATISTICS SET('numRows'='200000000','rawDataSize'='0' ); +ALTER TABLE table_a UPDATE STATISTICS FOR COLUMN product_id SET('lowValue'='1000000','highValue'='100000000','numNulls'='0','numDVs'='300000' ); +ALTER TABLE table_a UPDATE STATISTICS FOR COLUMN start_date SET('lowValue'='10000','highValue'='20000','numNulls'='0','numDVs'='2500' ); + +CREATE TABLE table_b (start_date date, product_id int, product_sk string); + +ALTER TABLE table_b UPDATE STATISTICS SET('numRows'='100000000','rawDataSize'='0' ); +ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN product_id SET('lowValue'='1000000','highValue'='100000000','numNulls'='0','numDVs'='300000' ); +ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN start_date SET('lowValue'='10000','highValue'='20000','numNulls'='0','numDVs'='500' ); +ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN product_sk SET ('numDVs'='300000','numNulls'='0','avgColLen'='10','maxColLen'='10'); + +set hive.optimize.dynamic.partition.hashjoin=true; +set hive.auto.convert.join=true; +set hive.auto.convert.join.noconditionaltask.size=180000000; + +EXPLAIN +SELECT TC.CONST_DATE, TB.PRODUCT_SK +FROM TABLE_A TA +INNER JOIN (SELECT TO_DATE(FROM_UNIXTIME(1701088643)) AS CONST_DATE) TC + ON TA.START_DATE = TC.CONST_DATE +INNER JOIN TABLE_B TB + ON TB.START_DATE = TC.CONST_DATE AND TA.PRODUCT_ID = TB.PRODUCT_ID; diff --git a/ql/src/test/queries/clientpositive/udf_array_position.q b/ql/src/test/queries/clientpositive/udf_array_position.q new file mode 100644 index 000000000000..aa82679cb679 --- /dev/null +++ b/ql/src/test/queries/clientpositive/udf_array_position.q @@ -0,0 +1,50 @@ +--! qt:dataset:src + +-- SORT_QUERY_RESULTS + +set hive.fetch.task.conversion=more; + +DESCRIBE FUNCTION array_position; +DESCRIBE FUNCTION EXTENDED array_position; + +-- evaluates function for array of primitives +SELECT array_position(array(1, 2, 3, null,3,4), 3); + +SELECT array_position(array(1.12, 2.23, 3.34, null,1.11,1.12,2.9),1.12); + +SELECT array(1,2,3),array_position(array(1, 2, 3),3); + +SELECT array(1,2,3),array_position(array(1, 2, 3),5); + +SELECT array_position(array(1, 2, 3), CAST(null AS int)); + +SELECT array_position(array(1.1234567890, 2.234567890, 3.34567890, null, 3.3456789, 2.234567,1.1234567890),1.1234567890); + +SELECT array_position(array(11234567890, 2234567890, 334567890, null, 11234567890, 2234567890, 334567890, null),11234567890); + +SELECT array_position(array(array("a","b","c","d"),array("a","b","c","d"),array("a","b","c","d","e"),null,array("e","a","b","c","d")),array("a","b","c","d")); + +SELECT array_position(array("aa","bb","cc"),"cc"); + +create table test as select array('a', 'b', 'c', 'b') as a union all select array('a', 'c', 'd') as a; + +select * from test; + +select a, array_position(a, 'b') from test; + +# handle null array cases + +dfs ${system:test.dfs.mkdir} ${system:test.tmp.dir}/test_null_array; + +dfs -copyFromLocal ../../data/files/test_null_array.csv ${system:test.tmp.dir}/test_null_array/; + +create external table test_null_array (id string, value Array) ROW FORMAT DELIMITED + FIELDS TERMINATED BY ':' collection items terminated by ',' location '${system:test.tmp.dir}/test_null_array'; + +select id,value from test_null_array; + +select id,array_position(value,id) from test_null_array; + +select value, array_position(value,id) from test_null_array; + +dfs -rm -r ${system:test.tmp.dir}/test_null_array; \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/udtf_stack.q b/ql/src/test/queries/clientpositive/udtf_stack.q index 8aa7a8cfe80e..def0c38f5d56 100644 --- a/ql/src/test/queries/clientpositive/udtf_stack.q +++ b/ql/src/test/queries/clientpositive/udtf_stack.q @@ -11,3 +11,6 @@ SELECT x, y FROM src LATERAL VIEW STACK(2, 'x', array(1), 'z', array(4)) a AS x, EXPLAIN SELECT stack(1, "en", "dbpedia", NULL ); SELECT stack(1, "en", "dbpedia", NULL ); + +EXPLAIN SELECT STACK(2, 'a', 'b', 'c', 'd', 'e'); +SELECT STACK(2, 'a', 'b', 'c', 'd', 'e'); diff --git a/ql/src/test/queries/clientpositive/varchar_udf1.q b/ql/src/test/queries/clientpositive/varchar_udf1.q index 7aee1ce2251e..80e184a8c199 100644 --- a/ql/src/test/queries/clientpositive/varchar_udf1.q +++ b/ql/src/test/queries/clientpositive/varchar_udf1.q @@ -1,4 +1,5 @@ --! qt:dataset:src +--! qt:dataset:temp_udf drop table varchar_udf_1; create table varchar_udf_1 (c1 string, c2 string, c3 varchar(10), c4 varchar(20)); diff --git a/ql/src/test/queries/clientpositive/vector_udf1.q b/ql/src/test/queries/clientpositive/vector_udf1.q index a258025dc10a..5a642dafb281 100644 --- a/ql/src/test/queries/clientpositive/vector_udf1.q +++ b/ql/src/test/queries/clientpositive/vector_udf1.q @@ -1,4 +1,5 @@ --! qt:dataset:src +--! qt:dataset:temp_udf SET hive.vectorized.execution.enabled=true; set hive.fetch.task.conversion=none; diff --git a/ql/src/test/results/clientnegative/avro_duplicate_field_name.q.out b/ql/src/test/results/clientnegative/avro_duplicate_field_name.q.out new file mode 100644 index 000000000000..3d7527c33a05 --- /dev/null +++ b/ql/src/test/results/clientnegative/avro_duplicate_field_name.q.out @@ -0,0 +1,27 @@ +PREHOOK: query: CREATE TABLE avroExternalDupField +STORED AS AVRO +TBLPROPERTIES ('avro.schema.literal'='{ + "namespace": "org.apache.hive", + "name": "my_schema", + "type": "record", + "fields": [ + { + "name": "f1", + "type": { + "name": "a", + "type": "record", + "fields": [] + } + }, { + "name": "f1", + "type": { + "name": "b", + "type": "record", + "fields": [] + } + } + ] }') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@avroExternalDupField +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Invalid schema reported) diff --git a/ql/src/test/results/clientnegative/avro_invalid_field_name.q.out b/ql/src/test/results/clientnegative/avro_invalid_field_name.q.out new file mode 100644 index 000000000000..75ae3f4e8d30 --- /dev/null +++ b/ql/src/test/results/clientnegative/avro_invalid_field_name.q.out @@ -0,0 +1,20 @@ +PREHOOK: query: CREATE TABLE avroExternalInvalidField +STORED AS AVRO +TBLPROPERTIES ('avro.schema.literal'='{ + "namespace": "org.apache.hive", + "name": "my_record", + "type": "record", + "fields": [ + { + "name": "f1.x", + "type": { + "name": "my_enum", + "type": "enum", + "symbols": ["a"] + } + } + ] }') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@avroExternalInvalidField +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. java.lang.RuntimeException: MetaException(message:org.apache.hadoop.hive.serde2.SerDeException Invalid schema reported) diff --git a/ql/src/test/results/clientnegative/broken_pipe.q.out b/ql/src/test/results/clientnegative/broken_pipe.q.out index 3b830247418f..6eaf02a92139 100644 --- a/ql/src/test/results/clientnegative/broken_pipe.q.out +++ b/ql/src/test/results/clientnegative/broken_pipe.q.out @@ -3,13 +3,13 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: hdfs://### HDFS PATH ### Status: Failed -Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### @@ -18,13 +18,13 @@ Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An e ]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE] [Masked Vertex killed due to OTHER_VERTEX_FAILURE] DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1 -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/cachingprintstream.q.out b/ql/src/test/results/clientnegative/cachingprintstream.q.out index db6abc66ecb0..062139fc710a 100644 --- a/ql/src/test/results/clientnegative/cachingprintstream.q.out +++ b/ql/src/test/results/clientnegative/cachingprintstream.q.out @@ -9,7 +9,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### Status: Failed -Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -21,7 +21,7 @@ Caused by: java.io.IOException: Cannot run program "FAKE_SCRIPT_SHOULD_NOT_EXIST #### A masked pattern was here #### Caused by: java.io.IOException: error=2, No such file or directory #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -41,7 +41,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### Status: Failed -Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -53,7 +53,7 @@ Caused by: java.io.IOException: Cannot run program "FAKE_SCRIPT_SHOULD_NOT_EXIST #### A masked pattern was here #### Caused by: java.io.IOException: error=2, No such file or directory #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -68,7 +68,7 @@ Caused by: java.io.IOException: error=2, No such file or directory ]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE] DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:0 End cached logs. -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -80,7 +80,7 @@ Caused by: java.io.IOException: Cannot run program "FAKE_SCRIPT_SHOULD_NOT_EXIST #### A masked pattern was here #### Caused by: java.io.IOException: error=2, No such file or directory #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/compute_stats_long.q.out b/ql/src/test/results/clientnegative/compute_stats_long.q.out deleted file mode 100644 index 990a94179076..000000000000 --- a/ql/src/test/results/clientnegative/compute_stats_long.q.out +++ /dev/null @@ -1,57 +0,0 @@ -PREHOOK: query: create table tab_int(a int) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_int -POSTHOOK: query: create table tab_int(a int) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_int -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@tab_int -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/int.txt" INTO TABLE tab_int -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@tab_int -PREHOOK: query: select compute_stats(a, 'fm', 10000) from tab_int -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_int -#### A masked pattern was here #### -Status: Failed -Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: The maximum allowed value for number of bit vectors is 1024, but was passed 10000 bit vectors -#### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: The maximum allowed value for number of bit vectors is 1024, but was passed 10000 bit vectors -#### A masked pattern was here #### -]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE] -[Masked Vertex killed due to OTHER_VERTEX_FAILURE] -DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1 -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: The maximum allowed value for number of bit vectors is 1024, but was passed 10000 bit vectors -#### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row -#### A masked pattern was here #### -Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: The maximum allowed value for number of bit vectors is 1024, but was passed 10000 bit vectors -#### A masked pattern was here #### -]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE][Masked Vertex killed due to OTHER_VERTEX_FAILURE]DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:1 diff --git a/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out b/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out index 88b87bb25846..e2491900dd38 100644 --- a/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out +++ b/ql/src/test/results/clientnegative/groupby_cube_multi_gby.q.out @@ -14,4 +14,4 @@ POSTHOOK: query: create table t2 like src POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t2 -FAILED: SemanticException [Error 10315]: Grouping sets aggregations (with rollups or cubes) are not allowed when HIVEMULTIGROUPBYSINGLEREDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets +FAILED: SemanticException [Error 10315]: Grouping sets aggregations (with rollups or cubes) are not allowed when HIVE_MULTI_GROUPBY_SINGLE_REDUCER is turned on. Set hive.multigroupby.singlereducer=false if you want to use grouping sets diff --git a/ql/src/test/results/clientnegative/msck_repair_1.q.out b/ql/src/test/results/clientnegative/msck_repair_1.q.out index c89b16ffc97c..382a91389cf5 100644 --- a/ql/src/test/results/clientnegative/msck_repair_1.q.out +++ b/ql/src/test/results/clientnegative/msck_repair_1.q.out @@ -21,4 +21,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/msck_repair_2.q.out b/ql/src/test/results/clientnegative/msck_repair_2.q.out index c89b16ffc97c..382a91389cf5 100644 --- a/ql/src/test/results/clientnegative/msck_repair_2.q.out +++ b/ql/src/test/results/clientnegative/msck_repair_2.q.out @@ -21,4 +21,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/msck_repair_3.q.out b/ql/src/test/results/clientnegative/msck_repair_3.q.out index c89b16ffc97c..382a91389cf5 100644 --- a/ql/src/test/results/clientnegative/msck_repair_3.q.out +++ b/ql/src/test/results/clientnegative/msck_repair_3.q.out @@ -21,4 +21,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/msck_repair_4.q.out b/ql/src/test/results/clientnegative/msck_repair_4.q.out index d7f92f5f86b2..47ff66ca1a75 100644 --- a/ql/src/test/results/clientnegative/msck_repair_4.q.out +++ b/ql/src/test/results/clientnegative/msck_repair_4.q.out @@ -21,4 +21,4 @@ POSTHOOK: Output: default@repairtable PREHOOK: query: MSCK REPAIR TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/msck_repair_5.q.out b/ql/src/test/results/clientnegative/msck_repair_5.q.out index 3008ccb90454..1e452c0a53c9 100644 --- a/ql/src/test/results/clientnegative/msck_repair_5.q.out +++ b/ql/src/test/results/clientnegative/msck_repair_5.q.out @@ -29,4 +29,4 @@ Partitions not in metastore: repairtable:p1=A PREHOOK: query: MSCK REPAIR TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Found two paths for same partition 'repairtable:p1=a' for table repairtable diff --git a/ql/src/test/results/clientnegative/msck_repair_6.q.out b/ql/src/test/results/clientnegative/msck_repair_6.q.out index 2ec1e6517cad..a6c22cc4779a 100644 --- a/ql/src/test/results/clientnegative/msck_repair_6.q.out +++ b/ql/src/test/results/clientnegative/msck_repair_6.q.out @@ -29,4 +29,4 @@ Partitions not in metastore: repairtable:p1=a PREHOOK: query: MSCK REPAIR TABLE default.repairtable PREHOOK: type: MSCK PREHOOK: Output: default@repairtable -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. The partition 'repairtable:p1=a' already exists for tablerepairtable diff --git a/ql/src/test/results/clientnegative/msck_repair_7.q.out b/ql/src/test/results/clientnegative/msck_repair_7.q.out new file mode 100644 index 000000000000..76c55bf9938b --- /dev/null +++ b/ql/src/test/results/clientnegative/msck_repair_7.q.out @@ -0,0 +1,18 @@ +PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@repairtable +POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@repairtable +PREHOOK: query: MSCK REPAIR TABLE default.repairtable +PREHOOK: type: MSCK +PREHOOK: Output: default@repairtable +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/msck_repair_8.q.out b/ql/src/test/results/clientnegative/msck_repair_8.q.out new file mode 100644 index 000000000000..11b930347992 --- /dev/null +++ b/ql/src/test/results/clientnegative/msck_repair_8.q.out @@ -0,0 +1,15 @@ +PREHOOK: query: DROP TABLE IF EXISTS repairtable +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: DROP TABLE IF EXISTS repairtable +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@repairtable +POSTHOOK: query: CREATE TABLE repairtable(col STRING) PARTITIONED BY (p1 STRING) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@repairtable +FAILED: SemanticException [Error 10431]: Table Meta Ref extension is not supported for table repairtable. diff --git a/ql/src/test/results/clientnegative/script_broken_pipe3.q.out b/ql/src/test/results/clientnegative/script_broken_pipe3.q.out index 2ef43286244e..cfe51bcfa223 100644 --- a/ql/src/test/results/clientnegative/script_broken_pipe3.q.out +++ b/ql/src/test/results/clientnegative/script_broken_pipe3.q.out @@ -3,13 +3,13 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### Status: Failed -Vertex failed, vertexName=Reducer 2, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. +Vertex failed, vertexName=Reducer 2, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### @@ -17,13 +17,13 @@ Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An e #### A masked pattern was here #### ]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Reducer 2] killed/failed due to:OWN_TASK_FAILURE] DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:0 -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Reducer 2, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. +FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Reducer 2, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/script_error.q.out b/ql/src/test/results/clientnegative/script_error.q.out index 00b1ac2c7814..9c184f60a69b 100644 --- a/ql/src/test/results/clientnegative/script_error.q.out +++ b/ql/src/test/results/clientnegative/script_error.q.out @@ -55,13 +55,13 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### Status: Failed -Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### @@ -69,13 +69,13 @@ Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An e #### A masked pattern was here #### ]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE] DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:0 -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: org.apache.hadoop.hive.ql.metadata.HiveException: [Error 20003]: An error occurred when trying to close the Operator running your custom script. #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### Caused by: java.lang.RuntimeException: Hive Runtime Error while closing operators #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/stack_trace.q.out b/ql/src/test/results/clientnegative/stack_trace.q.out index 73fef35981f0..6d4331cdefe3 100644 --- a/ql/src/test/results/clientnegative/stack_trace.q.out +++ b/ql/src/test/results/clientnegative/stack_trace.q.out @@ -3,7 +3,7 @@ PREHOOK: type: QUERY PREHOOK: Input: default@src PREHOOK: Output: hdfs://### HDFS PATH ### Status: Failed -Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -15,7 +15,7 @@ Caused by: java.io.IOException: Cannot run program "script_does_not_exist": erro #### A masked pattern was here #### Caused by: java.io.IOException: error=2, No such file or directory #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -29,7 +29,7 @@ Caused by: java.io.IOException: error=2, No such file or directory #### A masked pattern was here #### ]], Vertex did not succeed due to OWN_TASK_FAILURE, failedTasks:1 killedTasks:0, Vertex vertex_#ID# [Map 1] killed/failed due to:OWN_TASK_FAILURE] DAG did not succeed due to VERTEX_FAILURE. failedVertices:1 killedVertices:0 -FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +FAILED: Execution Error, return code 2 from org.apache.hadoop.hive.ql.exec.tez.TezTask. Vertex failed, vertexName=Map 1, vertexId=vertex_#ID#, diagnostics=[Task failed, taskId=task_#ID#, diagnostics=[TaskAttempt 0 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### @@ -41,7 +41,7 @@ Caused by: java.io.IOException: Cannot run program "script_does_not_exist": erro #### A masked pattern was here #### Caused by: java.io.IOException: error=2, No such file or directory #### A masked pattern was here #### -], TaskAttempt 1 failed, info=[Error: Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row +], TaskAttempt 1 failed, info=[Error: Node: #NODE# : Error while running task ( failure ) : attempt_#ID#:java.lang.RuntimeException: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### Caused by: java.lang.RuntimeException: org.apache.hadoop.hive.ql.metadata.HiveException: Hive Runtime Error while processing row #### A masked pattern was here #### diff --git a/ql/src/test/results/clientnegative/table_nonprintable_negative.q.out b/ql/src/test/results/clientnegative/table_nonprintable_negative.q.out index 23756c167ce5..fa0610a807f0 100644 --- a/ql/src/test/results/clientnegative/table_nonprintable_negative.q.out +++ b/ql/src/test/results/clientnegative/table_nonprintable_negative.q.out @@ -17,4 +17,4 @@ POSTHOOK: Output: default@table_external PREHOOK: query: msck repair table table_external PREHOOK: type: MSCK PREHOOK: Output: default@table_external -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.ddl.DDLTask +FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. Repair: Cannot add partition table_external:day=Foo due to invalid characters in the name diff --git a/ql/src/test/results/clientnegative/udf_enforce_constraint_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_enforce_constraint_wrong_type1.q.out new file mode 100644 index 000000000000..67279fc6b42c --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_enforce_constraint_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:26 Argument type mismatch '1': enforce_constraint only takes BOOLEAN, got int diff --git a/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type1.q.out new file mode 100644 index 000000000000..9f98fa8f9fe6 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 2:38 Argument type mismatch '1': This argument takes only constant STRING, got int diff --git a/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type2.q.out b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type2.q.out new file mode 100644 index 000000000000..9f98fa8f9fe6 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type2.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 2:38 Argument type mismatch '1': This argument takes only constant STRING, got int diff --git a/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type3.q.out b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type3.q.out new file mode 100644 index 000000000000..7b92a101cfea --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type3.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 2:47 Argument type mismatch '99.9': This argument takes only constant STRING or INT, got decimal(3,1) diff --git a/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type4.q.out b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type4.q.out new file mode 100644 index 000000000000..0c2f9310812c --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_exception_in_vertex_udf_wrong_type4.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 2:50 Argument type mismatch '99.9': This argument takes only constant STRING or INT, got decimal(3,1) diff --git a/ql/src/test/results/clientnegative/udf_greatest_error_3.q.out b/ql/src/test/results/clientnegative/udf_greatest_error_3.q.out new file mode 100644 index 000000000000..c6bb3413fc85 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_greatest_error_3.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 2:23 Argument type mismatch ''b'': greatest only takes primitive types as 2nd argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_grouping_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_grouping_wrong_type1.q.out new file mode 100644 index 000000000000..de4f379c3499 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_grouping_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:19 Argument type mismatch '2': grouping only takes primitive types as 2nd argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_isfalse_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_isfalse_wrong_type1.q.out new file mode 100644 index 000000000000..d03b7b33b73b --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_isfalse_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:15 Argument type mismatch '1': opfalse only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_isnotfalse_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_isnotfalse_wrong_type1.q.out new file mode 100644 index 000000000000..71712af520c8 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_isnotfalse_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:18 Argument type mismatch '1': opnotfalse only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_isnottrue_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_isnottrue_wrong_type1.q.out new file mode 100644 index 000000000000..61c08e9f91b4 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_isnottrue_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:17 Argument type mismatch '1': opnottrue only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_istrue_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_istrue_wrong_type1.q.out new file mode 100644 index 000000000000..6a235dad76e4 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_istrue_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:14 Argument type mismatch '1': optrue only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_split_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_split_wrong_type1.q.out new file mode 100644 index 000000000000..b10d0dc9e7b9 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_split_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:13 Argument type mismatch ''1,2,3'': split only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_split_wrong_type2.q.out b/ql/src/test/results/clientnegative/udf_split_wrong_type2.q.out new file mode 100644 index 000000000000..10d58d85ec60 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_split_wrong_type2.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:22 Argument type mismatch '':'': split only takes primitive types as 2nd argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_sq_count_check_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_sq_count_check_wrong_type1.q.out new file mode 100644 index 000000000000..8e82da5661da --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_sq_count_check_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:22 Argument type mismatch '1': sq_count_check only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_to_epoch_milli_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_to_epoch_milli_wrong_type1.q.out new file mode 100644 index 000000000000..ca8e47d82c93 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_to_epoch_milli_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 2:22 Argument type mismatch '1': epochmilli only takes TIMESTAMP, got int diff --git a/ql/src/test/results/clientnegative/udf_trunc_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_trunc_wrong_type1.q.out new file mode 100644 index 000000000000..571b97e3e902 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_trunc_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:13 Argument type mismatch ''2023-12-04'': trunc only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_trunc_wrong_type2.q.out b/ql/src/test/results/clientnegative/udf_trunc_wrong_type2.q.out new file mode 100644 index 000000000000..65097f1bf7db --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_trunc_wrong_type2.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:27 Argument type mismatch ''MM'': trunc only takes primitive types as 2nd argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_tumbling_window_check_wrong_type1.q.out b/ql/src/test/results/clientnegative/udf_tumbling_window_check_wrong_type1.q.out new file mode 100644 index 000000000000..f22b44f7b435 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_tumbling_window_check_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:23 Argument type mismatch ''2020-03-01 06:03:00'': tumbling_window only takes primitive types as 1st argument, got LIST diff --git a/ql/src/test/results/clientnegative/udf_tumbling_window_check_wrong_type2.q.out b/ql/src/test/results/clientnegative/udf_tumbling_window_check_wrong_type2.q.out new file mode 100644 index 000000000000..c13ec561a2a5 --- /dev/null +++ b/ql/src/test/results/clientnegative/udf_tumbling_window_check_wrong_type2.q.out @@ -0,0 +1 @@ +FAILED: SemanticException [Error 10016]: Line 1:65 Argument type mismatch ''5'': tumbling_window only takes primitive types as 2nd argument, got LIST diff --git a/ql/src/test/results/clientnegative/udtf_stack_not_constant.q.out b/ql/src/test/results/clientnegative/udtf_stack_not_constant.q.out new file mode 100644 index 000000000000..86b264a179de --- /dev/null +++ b/ql/src/test/results/clientnegative/udtf_stack_not_constant.q.out @@ -0,0 +1 @@ +FAILED: UDFArgumentException The first argument to STACK() must be a constant. diff --git a/ql/src/test/results/clientnegative/udtf_stack_null.q.out b/ql/src/test/results/clientnegative/udtf_stack_null.q.out new file mode 100644 index 000000000000..854a2361c3a9 --- /dev/null +++ b/ql/src/test/results/clientnegative/udtf_stack_null.q.out @@ -0,0 +1 @@ +FAILED: UDFArgumentException The first argument of STACK() must not be null. diff --git a/ql/src/test/results/clientnegative/udtf_stack_wrong_type1.q.out b/ql/src/test/results/clientnegative/udtf_stack_wrong_type1.q.out new file mode 100644 index 000000000000..d18a726b9c17 --- /dev/null +++ b/ql/src/test/results/clientnegative/udtf_stack_wrong_type1.q.out @@ -0,0 +1 @@ +FAILED: UDFArgumentTypeException The first argument to STACK() must be a constant integer (got string instead). diff --git a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out index 8f3788d40fab..5bdf0edc2b44 100644 --- a/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out +++ b/ql/src/test/results/clientpositive/cbo_rp_auto_join1.q.out @@ -92,8 +92,10 @@ POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### STAGE DEPENDENCIES: Stage-1 is a root stage - Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 + Stage-2 depends on stages: Stage-1, Stage-4 + Stage-3 depends on stages: Stage-2 + Stage-4 is a root stage + Stage-0 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -112,49 +114,53 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.99 + mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: bigint) - outputColumnNames: key, $f1 + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint) + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key, $f1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: TableScan - alias: subq1:b - filterExpr: key is not null (type: boolean) - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: key is not null (type: boolean) - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) - outputColumnNames: key - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - keys: key (type: int) - mode: final - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: bigint) - outputColumnNames: key, $f1 - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint) + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint) + TableScan + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint) Reduce Operator Tree: Join Operator condition map: @@ -181,7 +187,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-2 + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan @@ -205,6 +211,50 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Stage: Stage-4 + Map Reduce + Map Operator Tree: + TableScan + alias: subq1:b + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int) + outputColumnNames: key + Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + bucketGroup: true + keys: key (type: int) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key, $f1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + Stage: Stage-0 Fetch Operator limit: -1 @@ -255,7 +305,8 @@ POSTHOOK: Input: default@tbl2_n12 STAGE DEPENDENCIES: Stage-1 is a root stage Stage-2 depends on stages: Stage-1 - Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-2 + Stage-0 depends on stages: Stage-3 STAGE PLANS: Stage: Stage-1 @@ -273,20 +324,42 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.99 + mode: hash outputColumnNames: _col0 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int) - outputColumnNames: key + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE TableScan alias: subq2:subq1:b filterExpr: key is not null (type: boolean) @@ -325,7 +398,7 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-2 + Stage: Stage-3 Map Reduce Map Operator Tree: TableScan @@ -421,16 +494,20 @@ POSTHOOK: Input: default@tbl1_n13 POSTHOOK: Input: default@tbl2_n12 #### A masked pattern was here #### STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1, Stage-3, Stage-5 Stage-3 is a root stage - Stage-1 depends on stages: Stage-3 - Stage-0 depends on stages: Stage-1 + Stage-4 is a root stage + Stage-5 depends on stages: Stage-4, Stage-6 + Stage-6 is a root stage + Stage-0 depends on stages: Stage-2 STAGE PLANS: - Stage: Stage-3 + Stage: Stage-1 Map Reduce Map Operator Tree: TableScan - alias: src1:subq1:a + alias: src2:subq2:a filterExpr: key is not null (type: boolean) Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator @@ -442,23 +519,89 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.99 + mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: bigint) - outputColumnNames: key, $f1 + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint) + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key, $f1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: TableScan - alias: src1:subq1:b + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint) + TableScan + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint) + TableScan + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint), $f10 (type: bigint) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + Inner Join 0 to 2 + keys: + 0 key (type: int) + 1 key (type: int) + 2 key (type: int) + outputColumnNames: key, $f1, $f10, $f11, $f100 + Statistics: Num rows: 6 Data size: 216 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int), ($f11 * $f100) (type: bigint), ($f1 * $f10) (type: bigint) + outputColumnNames: key, cnt1, cnt11 + Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: src2:subq2:b filterExpr: key is not null (type: boolean) Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator @@ -470,30 +613,27 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.99 + mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: bigint) - outputColumnNames: key, $f1 + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint) + value expressions: _col1 (type: bigint) + Execution mode: vectorized Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 key (type: int) - 1 key (type: int) - outputColumnNames: key, $f1, $f10 - Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key, $f1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false table: @@ -501,11 +641,11 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe - Stage: Stage-1 + Stage: Stage-4 Map Reduce Map Operator Tree: TableScan - alias: src2:subq2:a + alias: src1:subq1:a filterExpr: key is not null (type: boolean) Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator @@ -517,23 +657,74 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.99 + mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: bigint) - outputColumnNames: key, $f1 + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint) + value expressions: _col1 (type: bigint) + Execution mode: vectorized + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key, $f1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-5 + Map Reduce + Map Operator Tree: TableScan - alias: src2:subq2:b + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint) + TableScan + Reduce Output Operator + key expressions: key (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: $f1 (type: bigint) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: key, $f1, $f10 + Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-6 + Map Reduce + Map Operator Tree: + TableScan + alias: src1:subq1:b filterExpr: key is not null (type: boolean) Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator @@ -545,51 +736,33 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.99 + mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col1 (type: bigint) - outputColumnNames: key, $f1 + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint) - TableScan - Reduce Output Operator - key expressions: key (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: key (type: int) - Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: $f1 (type: bigint), $f10 (type: bigint) + value expressions: _col1 (type: bigint) + Execution mode: vectorized Reduce Operator Tree: - Join Operator - condition map: - Inner Join 0 to 1 - Inner Join 0 to 2 - keys: - 0 key (type: int) - 1 key (type: int) - 2 key (type: int) - outputColumnNames: key, $f1, $f10, $f11, $f100 - Statistics: Num rows: 6 Data size: 216 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int), ($f11 * $f100) (type: bigint), ($f1 * $f10) (type: bigint) - outputColumnNames: key, cnt1, cnt11 - Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 6 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: key, $f1 + Statistics: Num rows: 6 Data size: 72 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/llap/allcolref_in_udf.q.out b/ql/src/test/results/clientpositive/llap/allcolref_in_udf.q.out index 8e20aaa35d86..236d52a2ded5 100644 --- a/ql/src/test/results/clientpositive/llap/allcolref_in_udf.q.out +++ b/ql/src/test/results/clientpositive/llap/allcolref_in_udf.q.out @@ -168,15 +168,15 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### 4val_45val_5 4val_4 5val_5 -4val_45 NULL 5val_5 +4val_45 45val_5 NULL 4val_45val_5 4val_4 5val_5 -4val_45 NULL 5val_5 +4val_45 45val_5 NULL 4val_45val_5 4val_4 5val_5 -4val_45 NULL 5val_5 +4val_45 45val_5 NULL 8val_89val_9 8val_8 9val_9 -8val_89 NULL 9val_9 +8val_89 89val_9 NULL 9val_910val_10 9val_9 10val_10 -9val_910 NULL 10val_10 +9val_910 910val_10 NULL PREHOOK: query: create table allcolref as select array(key, value) from src PREHOOK: type: CREATETABLE_AS_SELECT PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/llap/analyze_npe.q.out b/ql/src/test/results/clientpositive/llap/analyze_npe.q.out index df9097d44cc3..c63f23c4fb42 100644 --- a/ql/src/test/results/clientpositive/llap/analyze_npe.q.out +++ b/ql/src/test/results/clientpositive/llap/analyze_npe.q.out @@ -114,6 +114,7 @@ STAGE PLANS: Filter Operator predicate: c1 is null (type: boolean) Select Operator + expressions: null (type: struct) outputColumnNames: _col0 ListSink @@ -139,7 +140,7 @@ STAGE PLANS: Filter Operator predicate: c1 is null (type: boolean) Select Operator - expressions: null (type: void) + expressions: Const map null (type: map) outputColumnNames: _col0 ListSink @@ -165,7 +166,7 @@ STAGE PLANS: Filter Operator predicate: c1 is null (type: boolean) Select Operator - expressions: null (type: void) + expressions: Const array null (type: array) outputColumnNames: _col0 ListSink @@ -191,7 +192,7 @@ STAGE PLANS: Filter Operator predicate: c1 is null (type: boolean) Select Operator - expressions: null (type: void) + expressions: null (type: struct) outputColumnNames: _col0 ListSink diff --git a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out index 7b93c1d6a54f..35538b911745 100644 --- a/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out +++ b/ql/src/test/results/clientpositive/llap/auto_sortmerge_join_10.q.out @@ -372,7 +372,9 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (CUSTOM_SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -384,11 +386,22 @@ STAGE PLANS: Filter Operator predicate: (key < 6) (type: boolean) Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) + Group By Operator + bucketGroup: true + keys: key (type: int) + minReductionHashAggr: 0.4 + mode: hash outputColumnNames: _col0 - Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Dummy Store + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 5 Map Operator Tree: TableScan alias: a @@ -397,31 +410,54 @@ STAGE PLANS: Filter Operator predicate: (key < 6) (type: boolean) Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: key (type: int) - mode: final + Select Operator + expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - Statistics: Num rows: 7 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count() - minReductionHashAggr: 0.85714287 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: llap + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + Statistics: Num rows: 7 Data size: 56 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + minReductionHashAggr: 0.85714287 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) + Reducer 4 Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator @@ -492,6 +528,8 @@ STAGE PLANS: #### A masked pattern was here #### Edges: Reducer 2 <- Map 1 (SIMPLE_EDGE) + Reducer 3 <- Map 5 (SIMPLE_EDGE), Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 3 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 @@ -503,11 +541,22 @@ STAGE PLANS: Filter Operator predicate: (key < 6) (type: boolean) Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: key (type: int) + Group By Operator + bucketGroup: true + keys: key (type: int) + minReductionHashAggr: 0.4 + mode: hash outputColumnNames: _col0 - Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Dummy Store + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 5 Map Operator Tree: TableScan alias: a @@ -516,30 +565,53 @@ STAGE PLANS: Filter Operator predicate: (key < 6) (type: boolean) Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - keys: key (type: int) - mode: final + Select Operator + expressions: key (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col1 + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE - Execution mode: llap + Execution mode: vectorized, llap + LLAP IO: all inputs Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1 + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col1 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE + Reducer 4 Execution mode: vectorized, llap Reduce Operator Tree: Select Operator @@ -717,10 +789,29 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 5 <- Map 4 (SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 + Map Operator Tree: + TableScan + alias: t1 + Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 4 Map Operator Tree: TableScan alias: t2 @@ -730,51 +821,50 @@ STAGE PLANS: predicate: key is not null (type: boolean) Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator + bucketGroup: true keys: key (type: int) - mode: final + minReductionHashAggr: 0.4 + mode: hash outputColumnNames: _col0 Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: true (type: boolean), _col0 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE - Dummy Store - Map Operator Tree: - TableScan - alias: t1 - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Outer Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col1 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: _col1 is null (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: key (type: int) + expressions: _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 10 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - Merge Join Operator - condition map: - Left Outer Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col1 (type: int) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 10 Data size: 80 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: _col1 is null (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count(_col0) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: count(_col0) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: bigint) - Execution mode: llap - Reducer 2 + value expressions: _col0 (type: bigint) + Reducer 3 Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator @@ -789,6 +879,25 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + keys: KEY._col0 (type: int) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: true (type: boolean), _col0 (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col1 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col1 (type: int) + Statistics: Num rows: 6 Data size: 48 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: boolean) Stage: Stage-0 Fetch Operator diff --git a/ql/src/test/results/clientpositive/llap/bucket_decimal_col_select.q.out b/ql/src/test/results/clientpositive/llap/bucket_decimal_col_select.q.out new file mode 100644 index 000000000000..caa46d38e4be --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/bucket_decimal_col_select.q.out @@ -0,0 +1,27 @@ +PREHOOK: query: create table bucket_table(id decimal(38,0), name string) clustered by(id) into 3 buckets +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@bucket_table +POSTHOOK: query: create table bucket_table(id decimal(38,0), name string) clustered by(id) into 3 buckets +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@bucket_table +PREHOOK: query: insert into bucket_table values(5000000000000999640711, 'Cloud') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@bucket_table +POSTHOOK: query: insert into bucket_table values(5000000000000999640711, 'Cloud') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@bucket_table +POSTHOOK: Lineage: bucket_table.id SCRIPT [] +POSTHOOK: Lineage: bucket_table.name SCRIPT [] +PREHOOK: query: select * from bucket_table bt where id = 5000000000000999640711 +PREHOOK: type: QUERY +PREHOOK: Input: default@bucket_table +#### A masked pattern was here #### +POSTHOOK: query: select * from bucket_table bt where id = 5000000000000999640711 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@bucket_table +#### A masked pattern was here #### +5000000000000999640711 Cloud diff --git a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out index e0a6a183faa6..f85c3bbf22c9 100644 --- a/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out +++ b/ql/src/test/results/clientpositive/llap/bucket_groupby.q.out @@ -1512,30 +1512,79 @@ POSTHOOK: Input: default@clustergroupby POSTHOOK: Input: default@clustergroupby@ds=102 #### A masked pattern was here #### STAGE DEPENDENCIES: - Stage-0 is a root stage + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: clustergroupby + filterExpr: (ds = '102') (type: boolean) + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Top N Key Operator + sort order: ++ + keys: key (type: string), value (type: string) + null sort order: zz + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + top n: 10 + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: key, value + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + bucketGroup: true + keys: key (type: string), value (type: string) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 316 Data size: 58776 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 316 Data size: 58776 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint) + Execution mode: llap + LLAP IO: all inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 316 Data size: 58776 Basic stats: COMPLETE Column stats: COMPLETE + Limit + Number of rows: 10 + Statistics: Num rows: 10 Data size: 1860 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: string), _col2 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 10 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 10 Data size: 950 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Stage: Stage-0 Fetch Operator limit: 10 Processor Tree: - TableScan - alias: clustergroupby - filterExpr: (ds = '102') (type: boolean) - Select Operator - expressions: key (type: string), value (type: string) - outputColumnNames: key, value - Group By Operator - aggregations: count() - keys: key (type: string), value (type: string) - mode: final - outputColumnNames: _col0, _col1, _col2 - Limit - Number of rows: 10 - Select Operator - expressions: _col0 (type: string), _col2 (type: bigint) - outputColumnNames: _col0, _col1 - ListSink + ListSink PREHOOK: query: select key, count(1) from clustergroupby where ds='102' group by key, value limit 10 PREHOOK: type: QUERY @@ -1547,7 +1596,6 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@clustergroupby POSTHOOK: Input: default@clustergroupby@ds=102 #### A masked pattern was here #### -0 3 10 1 100 2 103 2 @@ -1555,8 +1603,9 @@ POSTHOOK: Input: default@clustergroupby@ds=102 105 1 11 1 111 1 -113 2 114 1 +0 3 +113 2 PREHOOK: query: drop table clustergroupby PREHOOK: type: DROPTABLE PREHOOK: Input: default@clustergroupby diff --git a/ql/src/test/results/clientpositive/llap/cast_null_to_complex.q.out b/ql/src/test/results/clientpositive/llap/cast_null_to_complex.q.out new file mode 100644 index 000000000000..d6db733df7ad --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/cast_null_to_complex.q.out @@ -0,0 +1,107 @@ +PREHOOK: query: explain cbo +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: explain cbo +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +Explain +CBO PLAN: +HiveProject(_o__c0=[null:(INTEGER, VARCHAR(2147483647) CHARACTER SET "UTF-16LE") MAP], _o__c1=[null:(INTEGER, VARCHAR(2147483647) CHARACTER SET "UTF-16LE") MAP ARRAY], _o__c2=[null:INTEGER], _o__c3=[null:RecordType((INTEGER, VARCHAR(2147483647) CHARACTER SET "UTF-16LE") MAP ARRAY f1, RecordType(DOUBLE a, VARCHAR(2147483647) CHARACTER SET "UTF-16LE" b) f2)]) + HiveTableScan(table=[[_dummy_database, _dummy_table]], table:alias=[_dummy_table]) + +PREHOOK: query: explain +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: explain +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +Explain +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Select Operator + expressions: Const map null (type: map), Const array> null (type: array>), null (type: int), null (type: struct>,f2:struct>) + outputColumnNames: _col0, _col1, _col2, _col3 + ListSink + +PREHOOK: query: select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +_c0 _c1 _c2 _c3 +NULL NULL NULL NULL +PREHOOK: query: create table t1 as +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1 as +select cast(null as map), cast(null as array>), cast(null as int), cast(null as struct>, f2:struct>) +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1._c0 SIMPLE [] +POSTHOOK: Lineage: t1._c1 SIMPLE [] +POSTHOOK: Lineage: t1._c2 SIMPLE [] +POSTHOOK: Lineage: t1._c3 SIMPLE [] +_c0 _c1 _c2 _c3 +PREHOOK: query: describe formatted t1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@t1 +POSTHOOK: query: describe formatted t1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@t1 +col_name data_type comment +# col_name data_type comment +_c0 map +_c1 array> +_c2 int +_c3 struct>,f2:struct> + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 1 + numRows 1 + rawDataSize 11 + totalSize 12 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 diff --git a/ql/src/test/results/clientpositive/llap/cbo_constantfolding.q.out b/ql/src/test/results/clientpositive/llap/cbo_constantfolding.q.out new file mode 100644 index 000000000000..5e316937c46a --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/cbo_constantfolding.q.out @@ -0,0 +1,60 @@ +PREHOOK: query: EXPLAIN CBO SELECT DATE_ADD('2023-01-01', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO SELECT DATE_ADD('2023-01-01', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +CBO PLAN: +HiveProject(_o__c0=[CAST(2023-01-02:DATE):DATE]) + HiveTableScan(table=[[_dummy_database, _dummy_table]], table:alias=[_dummy_table]) + +PREHOOK: query: EXPLAIN CBO SELECT DATE_SUB('2023-01-01', 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO SELECT DATE_SUB('2023-01-01', 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +CBO PLAN: +HiveProject(_o__c0=[CAST(2022-12-31:DATE):DATE]) + HiveTableScan(table=[[_dummy_database, _dummy_table]], table:alias=[_dummy_table]) + +PREHOOK: query: EXPLAIN CBO SELECT FROM_UNIXTIME(1672560000) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO SELECT FROM_UNIXTIME(1672560000) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +CBO PLAN: +HiveProject(_o__c0=[CAST(_UTF-16LE'2023-01-01 00:00:00':VARCHAR(2147483647) CHARACTER SET "UTF-16LE"):VARCHAR(2147483647) CHARACTER SET "UTF-16LE"]) + HiveTableScan(table=[[_dummy_database, _dummy_table]], table:alias=[_dummy_table]) + +PREHOOK: query: EXPLAIN CBO SELECT TO_UNIX_TIMESTAMP(DATE '2023-01-01') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO SELECT TO_UNIX_TIMESTAMP(DATE '2023-01-01') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +CBO PLAN: +HiveProject(_o__c0=[CAST(1672560000:BIGINT):BIGINT]) + HiveTableScan(table=[[_dummy_database, _dummy_table]], table:alias=[_dummy_table]) + +PREHOOK: query: EXPLAIN CBO SELECT UNIX_TIMESTAMP(DATE '2023-01-01') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO SELECT UNIX_TIMESTAMP(DATE '2023-01-01') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +CBO PLAN: +HiveProject(_o__c0=[CAST(1672560000:BIGINT):BIGINT]) + HiveTableScan(table=[[_dummy_database, _dummy_table]], table:alias=[_dummy_table]) + diff --git a/ql/src/test/results/clientpositive/llap/complex_datatype.q.out b/ql/src/test/results/clientpositive/llap/complex_datatype.q.out new file mode 100644 index 000000000000..727e185642c1 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/complex_datatype.q.out @@ -0,0 +1,668 @@ +PREHOOK: query: create EXTERNAL table `complex_map_array_table` as +select +'bob' as name, + MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_map_array_table +POSTHOOK: query: create EXTERNAL table `complex_map_array_table` as +select +'bob' as name, + MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_map_array_table +POSTHOOK: Lineage: complex_map_array_table.column2 EXPRESSION [] +POSTHOOK: Lineage: complex_map_array_table.name SIMPLE [] +PREHOOK: query: create EXTERNAL table `complex_map_struct_table` as +select +'bob' as name, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_map_struct_table +POSTHOOK: query: create EXTERNAL table `complex_map_struct_table` as +select +'bob' as name, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_map_struct_table +POSTHOOK: Lineage: complex_map_struct_table.column2 EXPRESSION [] +POSTHOOK: Lineage: complex_map_struct_table.name SIMPLE [] +PREHOOK: query: create EXTERNAL table `complex_table1` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +'bob' as name, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column3 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_table1 +POSTHOOK: query: create EXTERNAL table `complex_table1` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +'bob' as name, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column3 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_table1 +POSTHOOK: Lineage: complex_table1.column1 EXPRESSION [] +POSTHOOK: Lineage: complex_table1.column3 EXPRESSION [] +POSTHOOK: Lineage: complex_table1.name SIMPLE [] +PREHOOK: query: create EXTERNAL table `complex_table2` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_table2 +POSTHOOK: query: create EXTERNAL table `complex_table2` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +MAP( + "Map_Key1", + named_struct( + 'Id', + 'Id_Value1', + 'Name', + 'Name_Value1' + ), + "Map_Key2", + named_struct( + 'Id', + 'Id_Value2', + 'Name', + 'Name_Value2' + ) + ) as column2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_table2 +POSTHOOK: Lineage: complex_table2.column1 EXPRESSION [] +POSTHOOK: Lineage: complex_table2.column2 EXPRESSION [] +PREHOOK: query: create EXTERNAL table `complex_table3` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +MAP( + "Key3", + ARRAY( + 7, + 8, + 9 + ), + "Key4", + ARRAY( + 10, + 11, + 12 + ) + ) as column2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_table3 +POSTHOOK: query: create EXTERNAL table `complex_table3` as +select +MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column1, +MAP( + "Key3", + ARRAY( + 7, + 8, + 9 + ), + "Key4", + ARRAY( + 10, + 11, + 12 + ) + ) as column2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_table3 +POSTHOOK: Lineage: complex_table3.column1 EXPRESSION [] +POSTHOOK: Lineage: complex_table3.column2 EXPRESSION [] +PREHOOK: query: create EXTERNAL table `complex_array_map_table` as +select +'bob' as name, +ARRAY( + MAP( + "Key1", + "Value1" + ), + MAP( + "Key2", + "Value2" + ) + ) as column2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_array_map_table +POSTHOOK: query: create EXTERNAL table `complex_array_map_table` as +select +'bob' as name, +ARRAY( + MAP( + "Key1", + "Value1" + ), + MAP( + "Key2", + "Value2" + ) + ) as column2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_array_map_table +POSTHOOK: Lineage: complex_array_map_table.column2 EXPRESSION [] +POSTHOOK: Lineage: complex_array_map_table.name SIMPLE [] +PREHOOK: query: create EXTERNAL table `complex_map_map_table` as +select + 'bob' as name, + MAP( + "Key1", + MAP( + 1, + 2 + ), + "Key2", + MAP( + 3, + 4 + ) + ) as column2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_map_map_table +POSTHOOK: query: create EXTERNAL table `complex_map_map_table` as +select + 'bob' as name, + MAP( + "Key1", + MAP( + 1, + 2 + ), + "Key2", + MAP( + 3, + 4 + ) + ) as column2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_map_map_table +POSTHOOK: Lineage: complex_map_map_table.column2 EXPRESSION [] +POSTHOOK: Lineage: complex_map_map_table.name SIMPLE [] +PREHOOK: query: create EXTERNAL table `complex_combined_table` as +select + ARRAY('arr_val1', 'arr_val2', 'arr_val3') as column1, + 'bob' as column2, + MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column3, + NAMED_STRUCT('abc', '7', 'def', '8') as column4, + MAP( + "Key3", + "Value3", + "Key4", + "Value4" + ) as column5 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@complex_combined_table +POSTHOOK: query: create EXTERNAL table `complex_combined_table` as +select + ARRAY('arr_val1', 'arr_val2', 'arr_val3') as column1, + 'bob' as column2, + MAP( + "Key1", + ARRAY( + 1, + 2, + 3 + ), + "Key2", + ARRAY( + 4, + 5, + 6 + ) + ) as column3, + NAMED_STRUCT('abc', '7', 'def', '8') as column4, + MAP( + "Key3", + "Value3", + "Key4", + "Value4" + ) as column5 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@complex_combined_table +POSTHOOK: Lineage: complex_combined_table.column1 EXPRESSION [] +POSTHOOK: Lineage: complex_combined_table.column2 SIMPLE [] +POSTHOOK: Lineage: complex_combined_table.column3 EXPRESSION [] +POSTHOOK: Lineage: complex_combined_table.column4 EXPRESSION [] +POSTHOOK: Lineage: complex_combined_table.column5 EXPRESSION [] +PREHOOK: query: select * from complex_map_array_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_array_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_array_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_array_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Key1":[1,2,3],"Key2":[4,5,6]} +PREHOOK: query: select * from complex_map_struct_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_struct_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_struct_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_struct_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table1 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table1 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table1 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} bob {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table2 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table2 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table2 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table3 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table3 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table3 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} {"Key3":[7,8,9],"Key4":[10,11,12]} +PREHOOK: query: select * from complex_array_map_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_array_map_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_array_map_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_array_map_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob [{"Key1":"Value1"},{"Key2":"Value2"}] +PREHOOK: query: select * from complex_map_map_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_map_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_map_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_map_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Key1":{1:2},"Key2":{3:4}} +PREHOOK: query: select * from complex_combined_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_combined_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_combined_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_combined_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +["arr_val1","arr_val2","arr_val3"] bob {"Key1":[1,2,3],"Key2":[4,5,6]} {"abc":"7","def":"8"} {"Key3":"Value3","Key4":"Value4"} +PREHOOK: query: select * from complex_map_array_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_array_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_array_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_array_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Key1":[1,2,3],"Key2":[4,5,6]} +PREHOOK: query: select * from complex_map_struct_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_struct_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_struct_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_struct_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table1 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table1 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table1 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} bob {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table2 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table2 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table2 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table3 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table3 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table3 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} {"Key3":[7,8,9],"Key4":[10,11,12]} +PREHOOK: query: select * from complex_array_map_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_array_map_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_array_map_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_array_map_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob [{"Key1":"Value1"},{"Key2":"Value2"}] +PREHOOK: query: select * from complex_map_map_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_map_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_map_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_map_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Key1":{1:2},"Key2":{3:4}} +PREHOOK: query: select * from complex_combined_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_combined_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_combined_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_combined_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +["arr_val1","arr_val2","arr_val3"] bob {"Key1":[1,2,3],"Key2":[4,5,6]} {"abc":"7","def":"8"} {"Key3":"Value3","Key4":"Value4"} +PREHOOK: query: select * from complex_map_array_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_array_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_array_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_array_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Key1":[1,2,3],"Key2":[4,5,6]} +PREHOOK: query: select * from complex_map_struct_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_struct_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_struct_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_struct_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table1 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table1 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table1 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} bob {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table2 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table2 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table2 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} {"Map_Key1":{"id":"Id_Value1","name":"Name_Value1"},"Map_Key2":{"id":"Id_Value2","name":"Name_Value2"}} +PREHOOK: query: select * from complex_table3 +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_table3 +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_table3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_table3 +POSTHOOK: Output: hdfs://### HDFS PATH ### +{"Key1":[1,2,3],"Key2":[4,5,6]} {"Key3":[7,8,9],"Key4":[10,11,12]} +PREHOOK: query: select * from complex_array_map_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_array_map_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_array_map_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_array_map_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob [{"Key1":"Value1"},{"Key2":"Value2"}] +PREHOOK: query: select * from complex_map_map_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_map_map_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_map_map_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_map_map_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +bob {"Key1":{1:2},"Key2":{3:4}} +PREHOOK: query: select * from complex_combined_table +PREHOOK: type: QUERY +PREHOOK: Input: default@complex_combined_table +PREHOOK: Output: hdfs://### HDFS PATH ### +POSTHOOK: query: select * from complex_combined_table +POSTHOOK: type: QUERY +POSTHOOK: Input: default@complex_combined_table +POSTHOOK: Output: hdfs://### HDFS PATH ### +["arr_val1","arr_val2","arr_val3"] bob {"Key1":[1,2,3],"Key2":[4,5,6]} {"abc":"7","def":"8"} {"Key3":"Value3","Key4":"Value4"} diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_binary.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_binary.q.out deleted file mode 100644 index fc90c894cd95..000000000000 --- a/ql/src/test/results/clientpositive/llap/compute_stats_binary.q.out +++ /dev/null @@ -1,34 +0,0 @@ -PREHOOK: query: create table tab_binary(a binary) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_binary -POSTHOOK: query: create table tab_binary(a binary) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_binary -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@tab_binary -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/binary.txt" INTO TABLE tab_binary -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@tab_binary -PREHOOK: query: select count(*) from tab_binary -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_binary -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab_binary -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_binary -#### A masked pattern was here #### -10 -PREHOOK: query: select compute_stats(a, 16) from tab_binary -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_binary -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(a, 16) from tab_binary -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_binary -#### A masked pattern was here #### -{"columntype":"Binary","maxlength":58,"avglength":32.5,"countnulls":0} diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_boolean.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_boolean.q.out deleted file mode 100644 index be90ee222739..000000000000 --- a/ql/src/test/results/clientpositive/llap/compute_stats_boolean.q.out +++ /dev/null @@ -1,34 +0,0 @@ -PREHOOK: query: create table tab_bool(a boolean) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_bool -POSTHOOK: query: create table tab_bool(a boolean) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_bool -PREHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@tab_bool -POSTHOOK: query: LOAD DATA LOCAL INPATH "../../data/files/bool.txt" INTO TABLE tab_bool -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@tab_bool -PREHOOK: query: select count(*) from tab_bool -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_bool -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab_bool -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_bool -#### A masked pattern was here #### -33 -PREHOOK: query: select compute_stats(a, 16) from tab_bool -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_bool -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(a, 16) from tab_bool -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_bool -#### A masked pattern was here #### -{"columntype":"Boolean","counttrues":13,"countfalses":19,"countnulls":1} diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_date.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_date.q.out deleted file mode 100644 index b33be22d8581..000000000000 --- a/ql/src/test/results/clientpositive/llap/compute_stats_date.q.out +++ /dev/null @@ -1,176 +0,0 @@ -PREHOOK: query: create table tab_date ( - origin_city_name string, - dest_city_name string, - fl_date date, - arr_delay float, - fl_num int -) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_date -POSTHOOK: query: create table tab_date ( - origin_city_name string, - dest_city_name string, - fl_date date, - arr_delay float, - fl_num int -) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_date -PREHOOK: query: load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date -PREHOOK: type: LOAD -#### A masked pattern was here #### -PREHOOK: Output: default@tab_date -POSTHOOK: query: load data local inpath '../../data/files/flights_join.txt' overwrite into table tab_date -POSTHOOK: type: LOAD -#### A masked pattern was here #### -POSTHOOK: Output: default@tab_date -PREHOOK: query: select count(*) from tab_date -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_date -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab_date -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_date -#### A masked pattern was here #### -20 -PREHOOK: query: select compute_stats(fl_date, 'hll') from tab_date -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_date -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(fl_date, 'hll') from tab_date -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_date -#### A masked pattern was here #### -{"columntype":"Date","min":"2000-11-20","max":"2010-10-29","countnulls":0,"numdistinctvalues":19,"ndvbitvector":HLL������������t������������������������R��ո��������������Y������������������������������������������������犞} -PREHOOK: query: explain -analyze table tab_date compute statistics for columns fl_date -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab_date -PREHOOK: Output: default@tab_date -#### A masked pattern was here #### -POSTHOOK: query: explain -analyze table tab_date compute statistics for columns fl_date -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab_date -POSTHOOK: Output: default@tab_date -#### A masked pattern was here #### -STAGE DEPENDENCIES: - Stage-0 is a root stage - Stage-2 depends on stages: Stage-0 - -STAGE PLANS: - Stage: Stage-0 - Tez -#### A masked pattern was here #### - Edges: - Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) -#### A masked pattern was here #### - Vertices: - Map 1 - Map Operator Tree: - TableScan - alias: tab_date - Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: fl_date (type: date) - outputColumnNames: fl_date - Statistics: Num rows: 1 Data size: 56 Basic stats: COMPLETE Column stats: NONE - Group By Operator - aggregations: min(fl_date), max(fl_date), count(1), count(fl_date), compute_bit_vector_hll(fl_date) - minReductionHashAggr: 0.99 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: date), _col1 (type: date), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary) - Execution mode: llap - LLAP IO: all inputs - Reducer 2 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4) - mode: mergepartial - outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE - Select Operator - expressions: 'DATE' (type: string), _col0 (type: date), _col1 (type: date), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 - Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 328 Basic stats: COMPLETE Column stats: NONE - table: - input format: org.apache.hadoop.mapred.SequenceFileInputFormat - output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat - serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - - Stage: Stage-2 - Stats Work - Basic Stats Work: - Column Stats Desc: - Columns: fl_date - Column Types: date - Table: default.tab_date - -PREHOOK: query: analyze table tab_date compute statistics for columns fl_date -PREHOOK: type: ANALYZE_TABLE -PREHOOK: Input: default@tab_date -PREHOOK: Output: default@tab_date -#### A masked pattern was here #### -POSTHOOK: query: analyze table tab_date compute statistics for columns fl_date -POSTHOOK: type: ANALYZE_TABLE -POSTHOOK: Input: default@tab_date -POSTHOOK: Output: default@tab_date -#### A masked pattern was here #### -PREHOOK: query: describe formatted tab_date fl_date -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tab_date -POSTHOOK: query: describe formatted tab_date fl_date -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tab_date -col_name fl_date -data_type date -min 2000-11-20 -max 2010-10-29 -num_nulls 0 -distinct_count 19 -avg_col_len -max_col_len -num_trues -num_falses -bit_vector HL -comment from deserializer -COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}} -PREHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') -PREHOOK: type: ALTERTABLE_UPDATETABLESTATS -PREHOOK: Input: default@tab_date -PREHOOK: Output: default@tab_date -POSTHOOK: query: alter table tab_date update statistics for column fl_date set ('numDVs'='19', 'highValue'='2015-01-01', 'lowValue'='0') -POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS -POSTHOOK: Input: default@tab_date -POSTHOOK: Output: default@tab_date -PREHOOK: query: describe formatted tab_date fl_date -PREHOOK: type: DESCTABLE -PREHOOK: Input: default@tab_date -POSTHOOK: query: describe formatted tab_date fl_date -POSTHOOK: type: DESCTABLE -POSTHOOK: Input: default@tab_date -col_name fl_date -data_type date -min 1970-01-01 -max 2015-01-01 -num_nulls 0 -distinct_count 19 -avg_col_len -max_col_len -num_trues -num_falses -bit_vector HL -comment from deserializer -COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"fl_date\":\"true\"}} diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_decimal.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_decimal.q.out deleted file mode 100644 index 810a8b232af2..000000000000 Binary files a/ql/src/test/results/clientpositive/llap/compute_stats_decimal.q.out and /dev/null differ diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_double.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_double.q.out deleted file mode 100644 index c6a902022495..000000000000 Binary files a/ql/src/test/results/clientpositive/llap/compute_stats_double.q.out and /dev/null differ diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_empty_table.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_empty_table.q.out deleted file mode 100644 index 2ce83e14bf1a..000000000000 --- a/ql/src/test/results/clientpositive/llap/compute_stats_empty_table.q.out +++ /dev/null @@ -1,62 +0,0 @@ -PREHOOK: query: create table tab_empty(a boolean, b int, c double, d string, e binary) -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@tab_empty -POSTHOOK: query: create table tab_empty(a boolean, b int, c double, d string, e binary) -POSTHOOK: type: CREATETABLE -POSTHOOK: Output: database:default -POSTHOOK: Output: default@tab_empty -PREHOOK: query: select count(*) from tab_empty -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_empty -#### A masked pattern was here #### -POSTHOOK: query: select count(*) from tab_empty -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_empty -#### A masked pattern was here #### -0 -PREHOOK: query: select compute_stats(a, 16) from tab_empty -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_empty -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(a, 16) from tab_empty -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_empty -#### A masked pattern was here #### -{"columntype":"Boolean","counttrues":0,"countfalses":0,"countnulls":0} -PREHOOK: query: select compute_stats(b, 16) from tab_empty -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_empty -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(b, 16) from tab_empty -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_empty -#### A masked pattern was here #### -{"columntype":"Long","min":null,"max":null,"countnulls":0,"numdistinctvalues":0,"ndvbitvector":} -PREHOOK: query: select compute_stats(c, 16) from tab_empty -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_empty -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(c, 16) from tab_empty -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_empty -#### A masked pattern was here #### -{"columntype":"Double","min":null,"max":null,"countnulls":0,"numdistinctvalues":0,"ndvbitvector":} -PREHOOK: query: select compute_stats(d, 16) from tab_empty -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_empty -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(d, 16) from tab_empty -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_empty -#### A masked pattern was here #### -{"columntype":"String","maxlength":0,"avglength":0.0,"countnulls":0,"numdistinctvalues":0,"ndvbitvector":} -PREHOOK: query: select compute_stats(e, 16) from tab_empty -PREHOOK: type: QUERY -PREHOOK: Input: default@tab_empty -#### A masked pattern was here #### -POSTHOOK: query: select compute_stats(e, 16) from tab_empty -POSTHOOK: type: QUERY -POSTHOOK: Input: default@tab_empty -#### A masked pattern was here #### -{"columntype":"Binary","maxlength":0,"avglength":0.0,"countnulls":0} diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_long.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_long.q.out deleted file mode 100644 index 138ee3f10478..000000000000 Binary files a/ql/src/test/results/clientpositive/llap/compute_stats_long.q.out and /dev/null differ diff --git a/ql/src/test/results/clientpositive/llap/compute_stats_string.q.out b/ql/src/test/results/clientpositive/llap/compute_stats_string.q.out deleted file mode 100644 index 135ac45e7541..000000000000 Binary files a/ql/src/test/results/clientpositive/llap/compute_stats_string.q.out and /dev/null differ diff --git a/ql/src/test/results/clientpositive/llap/constant_prop_coalesce.q.out b/ql/src/test/results/clientpositive/llap/constant_prop_coalesce.q.out index cb0cd4de2c8d..43bb312ec5e2 100644 --- a/ql/src/test/results/clientpositive/llap/constant_prop_coalesce.q.out +++ b/ql/src/test/results/clientpositive/llap/constant_prop_coalesce.q.out @@ -50,12 +50,10 @@ STAGE PLANS: outputColumnNames: _col0, _col1, _col2 UDTF Operator function name: stack - Filter Operator - predicate: if((col0 = 1), true, true) (type: boolean) - Select Operator - expressions: if((col0 = 1), 20210308L, 20210309L) (type: bigint) - outputColumnNames: _col0 - ListSink + Select Operator + expressions: if((col0 = 1), 20210308L, 20210309L) (type: bigint) + outputColumnNames: _col0 + ListSink PREHOOK: query: select * from ( select diff --git a/ql/src/test/results/clientpositive/llap/cte_3.q.out b/ql/src/test/results/clientpositive/llap/cte_3.q.out index 16b398caf2b6..ee1fe65effff 100644 --- a/ql/src/test/results/clientpositive/llap/cte_3.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_3.q.out @@ -34,8 +34,8 @@ Stage-3 limit:-1 Select Operator [SEL_9] Output:["_col0"] - TableScan [TS_8] - Output:["key"] + TableScan [TS_8] (rows=2 width=85) + default@q1,q1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] PREHOOK: query: with q1 as ( select key from src where key = '5') select * @@ -92,8 +92,8 @@ Stage-3 limit:-1 Select Operator [SEL_9] Output:["_col0"] - TableScan [TS_8] - Output:["key"] + TableScan [TS_8] (rows=2 width=85) + default@q1,q1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] PREHOOK: query: with q1 as ( select key from src where key = '5') select * from (select key from q1) a @@ -136,12 +136,12 @@ Stage-5 Map 2 vectorized, llap File Output Operator [FS_15] table:{"name:":"default.q1"} - Select Operator [SEL_14] (rows=1 width=184) + Select Operator [SEL_14] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_13] (rows=1 width=184) + Filter Operator [FIL_13] (rows=2 width=85) predicate:(key = '5') - TableScan [TS_8] (rows=1 width=184) - default@q2,q2,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_8] (rows=2 width=85) + default@q2,q2,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] Stage-2 Dependency Collection{} Stage-1 @@ -165,8 +165,8 @@ Stage-6 limit:-1 Select Operator [SEL_17] Output:["_col0"] - TableScan [TS_16] - Output:["key"] + TableScan [TS_16] (rows=2 width=85) + default@q1,q1,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] PREHOOK: query: with q1 as ( select key from q2 where key = '5'), q2 as ( select key from src where key = '5') diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_10.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_10.q.out index 8262ba7c035a..45281a78563c 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_10.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_10.q.out @@ -98,14 +98,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a2 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: concat('a3 <- ', id) (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -133,14 +133,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: concat('b2 <- ', id) (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1005 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -151,14 +151,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a2 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 92 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1005 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -177,7 +177,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1005 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -188,14 +188,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a3 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1005 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -206,14 +206,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a3 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1005 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -224,14 +224,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: concat('b2 <- ', id) (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 6 Data size: 1005 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 913 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_11.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_11.q.out new file mode 100644 index 000000000000..e48d6e834ed9 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/cte_mat_11.q.out @@ -0,0 +1,480 @@ +PREHOOK: query: EXPLAIN WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT a.key + FROM materialized_cte1 a + JOIN materialized_cte1 b ON (a.key = b.key) +) +SELECT a.key +FROM materialized_cte2 a +JOIN materialized_cte2 b ON (a.key = b.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT a.key + FROM materialized_cte1 a + JOIN materialized_cte1 b ON (a.key = b.key) +) +SELECT a.key +FROM materialized_cte2 a +JOIN materialized_cte2 b ON (a.key = b.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-5 depends on stages: Stage-4 + Stage-7 depends on stages: Stage-5, Stage-3 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + Stage-6 depends on stages: Stage-7 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.materialized_cte1 + Execution mode: vectorized, llap + LLAP IO: all inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Tez +#### A masked pattern was here #### + Edges: + Reducer 3 <- Map 2 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: a + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 4 + Map Operator Tree: + TableScan + alias: b + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 500 Data size: 43500 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Reducer 3 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.materialized_cte2 + + Stage: Stage-5 + Dependency Collection + + Stage: Stage-7 + Tez +#### A masked pattern was here #### + Edges: + Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: a + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 7 + Map Operator Tree: + TableScan + alias: b + filterExpr: key is not null (type: boolean) + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 791 Data size: 68817 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Reducer 6 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1980 Data size: 172260 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1980 Data size: 172260 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-6 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN CBO WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT a.key + FROM materialized_cte1 a + JOIN materialized_cte1 b ON (a.key = b.key) +) +SELECT a.key +FROM materialized_cte2 a +JOIN materialized_cte2 b ON (a.key = b.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT a.key + FROM materialized_cte1 a + JOIN materialized_cte1 b ON (a.key = b.key) +) +SELECT a.key +FROM materialized_cte2 a +JOIN materialized_cte2 b ON (a.key = b.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +CBO PLAN: +HiveProject(key=[$0]) + HiveJoin(condition=[=($0, $1)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(key=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, materialized_cte2]], table:alias=[a]) + HiveProject(key=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, materialized_cte2]], table:alias=[b]) + +PREHOOK: query: EXPLAIN WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT * FROM materialized_cte1 + UNION ALL + SELECT * FROM materialized_cte1 +) +SELECT * FROM materialized_cte2 +UNION ALL +SELECT * FROM materialized_cte2 +PREHOOK: type: QUERY +PREHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT * FROM materialized_cte1 + UNION ALL + SELECT * FROM materialized_cte1 +) +SELECT * FROM materialized_cte2 +UNION ALL +SELECT * FROM materialized_cte2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-4 depends on stages: Stage-2, Stage-0 + Stage-5 depends on stages: Stage-4 + Stage-7 depends on stages: Stage-5, Stage-3 + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-1 + Stage-6 depends on stages: Stage-7 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.materialized_cte1 + Execution mode: vectorized, llap + LLAP IO: all inputs + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-4 + Tez +#### A masked pattern was here #### + Edges: + Map 2 <- Union 3 (CONTAINS) + Map 4 <- Union 3 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 2 + Map Operator Tree: + TableScan + alias: materialized_cte1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.materialized_cte2 + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 4 + Map Operator Tree: + TableScan + alias: materialized_cte1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 89000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.materialized_cte2 + Execution mode: vectorized, llap + LLAP IO: all inputs + Union 3 + Vertex: Union 3 + + Stage: Stage-5 + Dependency Collection + + Stage: Stage-7 + Tez +#### A masked pattern was here #### + Edges: + Map 5 <- Union 6 (CONTAINS) + Map 7 <- Union 6 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 5 + Map Operator Tree: + TableScan + alias: materialized_cte2 + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 7 + Map Operator Tree: + TableScan + alias: materialized_cte2 + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1000 Data size: 178000 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 2000 Data size: 356000 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized, llap + LLAP IO: all inputs + Union 6 + Vertex: Union 6 + + Stage: Stage-3 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-6 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: EXPLAIN CBO WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT * FROM materialized_cte1 + UNION ALL + SELECT * FROM materialized_cte1 +) +SELECT * FROM materialized_cte2 +UNION ALL +SELECT * FROM materialized_cte2 +PREHOOK: type: QUERY +PREHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN CBO WITH materialized_cte1 AS ( + SELECT * FROM src +), +materialized_cte2 AS ( + SELECT * FROM materialized_cte1 + UNION ALL + SELECT * FROM materialized_cte1 +) +SELECT * FROM materialized_cte2 +UNION ALL +SELECT * FROM materialized_cte2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@materialized_cte2 +#### A masked pattern was here #### +CBO PLAN: +HiveUnion(all=[true]) + HiveProject(key=[$0], value=[$1]) + HiveTableScan(table=[[default, materialized_cte2]], table:alias=[materialized_cte2]) + HiveProject(key=[$0], value=[$1]) + HiveTableScan(table=[[default, materialized_cte2]], table:alias=[materialized_cte2]) + diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out index 53cf4a16b745..d63d003dc266 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_3.q.out @@ -25,26 +25,26 @@ Stage-3 Stage-4 Reducer 3 llap File Output Operator [FS_18] - Merge Join Operator [MERGEJOIN_33] (rows=1 width=202) + Merge Join Operator [MERGEJOIN_33] (rows=4 width=85) Conds:RS_36._col0=RS_39._col0(Inner),Output:["_col0"] <-Map 2 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_36] PartitionCols:_col0 - Select Operator [SEL_35] (rows=1 width=184) + Select Operator [SEL_35] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_34] (rows=1 width=184) + Filter Operator [FIL_34] (rows=2 width=176) predicate:key is not null - TableScan [TS_8] (rows=1 width=184) - default@q1,a,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_8] (rows=2 width=176) + default@q1,a,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 4 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_39] PartitionCols:_col0 - Select Operator [SEL_38] (rows=1 width=184) + Select Operator [SEL_38] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_37] (rows=1 width=184) + Filter Operator [FIL_37] (rows=2 width=176) predicate:key is not null - TableScan [TS_11] (rows=1 width=184) - default@q1,b,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_11] (rows=2 width=176) + default@q1,b,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] Stage-2 Dependency Collection{} Stage-1 diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out index 79cc750108cf..4a1d6e0b06a9 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_4.q.out @@ -50,26 +50,26 @@ Stage-3 Stage-4 Reducer 3 llap File Output Operator [FS_18] - Merge Join Operator [MERGEJOIN_33] (rows=1 width=202) + Merge Join Operator [MERGEJOIN_33] (rows=4 width=85) Conds:RS_36._col0=RS_39._col0(Inner),Output:["_col0"] <-Map 2 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_36] PartitionCols:_col0 - Select Operator [SEL_35] (rows=1 width=184) + Select Operator [SEL_35] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_34] (rows=1 width=184) + Filter Operator [FIL_34] (rows=2 width=176) predicate:key is not null - TableScan [TS_8] (rows=1 width=184) - default@q1,a,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_8] (rows=2 width=176) + default@q1,a,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 4 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_39] PartitionCols:_col0 - Select Operator [SEL_38] (rows=1 width=184) + Select Operator [SEL_38] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_37] (rows=1 width=184) + Filter Operator [FIL_37] (rows=2 width=176) predicate:key is not null - TableScan [TS_11] (rows=1 width=184) - default@q1,b,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_11] (rows=2 width=176) + default@q1,b,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] Stage-2 Dependency Collection{} Stage-1 @@ -174,26 +174,26 @@ Stage-3 Stage-4 Reducer 3 llap File Output Operator [FS_18] - Merge Join Operator [MERGEJOIN_33] (rows=1 width=202) + Merge Join Operator [MERGEJOIN_33] (rows=4 width=85) Conds:RS_36._col0=RS_39._col0(Inner),Output:["_col0"] <-Map 2 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_36] PartitionCols:_col0 - Select Operator [SEL_35] (rows=1 width=184) + Select Operator [SEL_35] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_34] (rows=1 width=184) + Filter Operator [FIL_34] (rows=2 width=176) predicate:key is not null - TableScan [TS_8] (rows=1 width=184) - default@q1,a,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_8] (rows=2 width=176) + default@q1,a,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] <-Map 4 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_39] PartitionCols:_col0 - Select Operator [SEL_38] (rows=1 width=184) + Select Operator [SEL_38] (rows=2 width=85) Output:["_col0"] - Filter Operator [FIL_37] (rows=1 width=184) + Filter Operator [FIL_37] (rows=2 width=176) predicate:key is not null - TableScan [TS_11] (rows=1 width=184) - default@q1,b,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_11] (rows=2 width=176) + default@q1,b,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] Stage-2 Dependency Collection{} Stage-1 diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out index f5ef44070170..c5f5810ec36e 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_5.q.out @@ -70,7 +70,7 @@ Stage-3 Stage-4 Reducer 3 llap File Output Operator [FS_18] - Merge Join Operator [MERGEJOIN_33] (rows=1 width=13) + Merge Join Operator [MERGEJOIN_33] (rows=2 width=4) Conds:RS_36._col1=RS_39._col0(Inner),Output:["_col0"] <-Map 2 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_36] @@ -84,12 +84,12 @@ Stage-3 <-Map 4 [SIMPLE_EDGE] vectorized, llap SHUFFLE [RS_39] PartitionCols:_col0 - Select Operator [SEL_38] (rows=1 width=184) + Select Operator [SEL_38] (rows=2 width=8) Output:["_col0"] - Filter Operator [FIL_37] (rows=1 width=184) + Filter Operator [FIL_37] (rows=2 width=176) predicate:UDFToDouble(key) is not null - TableScan [TS_11] (rows=1 width=184) - default@q1,b,Tbl:COMPLETE,Col:NONE,Output:["key"] + TableScan [TS_11] (rows=2 width=176) + default@q1,b,Tbl:COMPLETE,Col:COMPLETE,Output:["key"] Stage-2 Dependency Collection{} Stage-1 diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_6.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_6.q.out index 42c2a3cfb691..ab3bd99ee299 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_6.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_6.q.out @@ -21,8 +21,8 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Output: default@t0 POSTHOOK: Lineage: t0.col0 SCRIPT [] -Warning: Shuffle Join MERGEJOIN[37][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product -Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 6' is a cross product +Warning: Shuffle Join MERGEJOIN[37][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 5' is a cross product PREHOOK: query: explain with cte as (select count(*) as small_count from t0 where col0 < 10) select t0.col0, (select small_count from cte) @@ -104,39 +104,39 @@ STAGE PLANS: Tez #### A masked pattern was here #### Edges: - Reducer 4 <- Map 3 (CUSTOM_SIMPLE_EDGE) - Reducer 5 <- Map 3 (CUSTOM_SIMPLE_EDGE), Reducer 4 (CUSTOM_SIMPLE_EDGE) - Reducer 6 <- Map 8 (XPROD_EDGE), Reducer 5 (XPROD_EDGE) - Reducer 7 <- Reducer 6 (SIMPLE_EDGE) + Reducer 4 <- Map 3 (CUSTOM_SIMPLE_EDGE), Reducer 7 (CUSTOM_SIMPLE_EDGE) + Reducer 5 <- Map 8 (XPROD_EDGE), Reducer 4 (XPROD_EDGE) + Reducer 6 <- Reducer 5 (SIMPLE_EDGE) + Reducer 7 <- Map 3 (CUSTOM_SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 3 Map Operator Tree: TableScan alias: cte - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: small_count (type: bigint) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: bigint) Select Operator - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() - minReductionHashAggr: 0.99 + minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) - Select Operator - expressions: small_count (type: bigint) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: all inputs Map 8 @@ -156,23 +156,6 @@ STAGE PLANS: Execution mode: vectorized, llap LLAP IO: all inputs Reducer 4 - Execution mode: vectorized, llap - Reduce Operator Tree: - Group By Operator - aggregations: count(VALUE._col0) - mode: mergepartial - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE - Filter Operator - predicate: sq_count_check(_col0) (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE - Select Operator - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE - Reducer 5 Execution mode: llap Reduce Operator Tree: Merge Join Operator @@ -182,13 +165,13 @@ STAGE PLANS: 0 1 outputColumnNames: _col1 - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) - Reducer 6 + Reducer 5 Execution mode: llap Reduce Operator Tree: Merge Join Operator @@ -198,31 +181,48 @@ STAGE PLANS: 0 1 outputColumnNames: _col1, _col2 - Statistics: Num rows: 7 Data size: 154 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: int), _col1 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 7 Data size: 154 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + - Statistics: Num rows: 7 Data size: 154 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) - Reducer 7 + Reducer 6 Execution mode: vectorized, llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 7 Data size: 154 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 7 Data size: 154 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 7 Data size: 84 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 7 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + mode: mergepartial + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: sq_count_check(_col0) (type: boolean) + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Stage: Stage-0 Move Operator @@ -236,8 +236,8 @@ STAGE PLANS: Processor Tree: ListSink -Warning: Shuffle Join MERGEJOIN[37][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 5' is a cross product -Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 6' is a cross product +Warning: Shuffle Join MERGEJOIN[37][tables = [$hdt$_1, $hdt$_2]] in Stage 'Reducer 4' is a cross product +Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_1, $hdt$_2, $hdt$_0]] in Stage 'Reducer 5' is a cross product PREHOOK: query: with cte as (select count(*) as small_count from t0 where col0 < 10) select t0.col0, (select small_count from cte) from t0 @@ -496,31 +496,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cte - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() - minReductionHashAggr: 0.99 + minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Filter Operator predicate: small_count is not null (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: small_count (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: all inputs @@ -551,16 +551,16 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: sq_count_check(_col0) (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: llap Reduce Operator Tree: @@ -571,11 +571,11 @@ STAGE PLANS: 0 1 outputColumnNames: _col1 - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reducer 6 Execution mode: llap @@ -588,26 +588,26 @@ STAGE PLANS: 1 outputColumnNames: _col1, _col2, _col3 residual filter predicates: {(_col3 > _col1)} - Statistics: Num rows: 2 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + - Statistics: Num rows: 2 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reducer 7 Execution mode: vectorized, llap Reduce Operator Tree: Select Operator expressions: KEY.reducesinkkey0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 2 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 60 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -752,31 +752,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cte - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: small_count is not null (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: small_count (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Select Operator - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() - minReductionHashAggr: 0.99 + minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: all inputs @@ -815,11 +815,11 @@ STAGE PLANS: 0 1 outputColumnNames: _col1 - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reducer 5 Execution mode: llap @@ -832,16 +832,16 @@ STAGE PLANS: 1 outputColumnNames: _col1, _col2, _col3 residual filter predicates: {(_col3 > _col1)} - Statistics: Num rows: 1 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col2 (type: int), _col3 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + - Statistics: Num rows: 1 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reducer 6 Execution mode: vectorized, llap @@ -849,10 +849,10 @@ STAGE PLANS: Select Operator expressions: KEY.reducesinkkey0 (type: int), VALUE._col0 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 30 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -864,16 +864,16 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: sq_count_check(_col0) (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reducer 9 Execution mode: vectorized, llap Reduce Operator Tree: @@ -1035,31 +1035,31 @@ STAGE PLANS: Map Operator Tree: TableScan alias: cte2 - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 1 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: count() - minReductionHashAggr: 0.99 + minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Filter Operator predicate: all_count is not null (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: all_count (type: bigint) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: bigint) Execution mode: vectorized, llap LLAP IO: all inputs @@ -1152,16 +1152,16 @@ STAGE PLANS: aggregations: count(VALUE._col0) mode: mergepartial outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: sq_count_check(_col0) (type: boolean) - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 8 Basic stats: PARTIAL Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reducer 5 Execution mode: llap Reduce Operator Tree: @@ -1172,11 +1172,11 @@ STAGE PLANS: 0 1 outputColumnNames: _col1 - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 17 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint) Reducer 6 Execution mode: llap @@ -1189,14 +1189,14 @@ STAGE PLANS: 1 outputColumnNames: _col1, _col2 residual filter predicates: {(_col2 > _col1)} - Statistics: Num rows: 1 Data size: 26 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: 5 (type: int), _col2 (type: bigint) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 26 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 26 Basic stats: PARTIAL Column stats: NONE + Statistics: Num rows: 1 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_8.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_8.q.out index cb086fdb9e5b..a241fa365546 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_8.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_8.q.out @@ -75,14 +75,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: concat('a2 <- ', id) (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -108,14 +108,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a1 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 86 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 637 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 4 Data size: 539 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -134,7 +134,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 85 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 637 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 4 Data size: 539 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -145,14 +145,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a2 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 637 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 4 Data size: 539 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -163,14 +163,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: a2 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: string) outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 4 Data size: 637 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 4 Data size: 539 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat diff --git a/ql/src/test/results/clientpositive/llap/cte_mat_9.q.out b/ql/src/test/results/clientpositive/llap/cte_mat_9.q.out index d9ac24a6f217..a5064535c42f 100644 --- a/ql/src/test/results/clientpositive/llap/cte_mat_9.q.out +++ b/ql/src/test/results/clientpositive/llap/cte_mat_9.q.out @@ -155,14 +155,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b1 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), concat('b2 <- ', tag) (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -187,17 +187,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b1 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), tag (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 96 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Execution mode: vectorized, llap LLAP IO: all inputs @@ -223,17 +223,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b2 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), tag (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Execution mode: vectorized, llap LLAP IO: all inputs @@ -241,17 +241,17 @@ STAGE PLANS: Map Operator Tree: TableScan alias: b2 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: id (type: int), concat('b3 <- ', tag) (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 1 Data size: 188 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: string) Execution mode: vectorized, llap LLAP IO: all inputs @@ -265,11 +265,11 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string) Reducer 5 Execution mode: llap @@ -284,14 +284,14 @@ STAGE PLANS: 0 1 outputColumnNames: _col0, _col1, _col3, _col5 - Statistics: Num rows: 1 Data size: 413 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 1488 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col1 (type: string), _col3 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 1 Data size: 413 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 1488 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 1 Data size: 413 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 4 Data size: 1488 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -306,15 +306,15 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col1, _col3 - Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 744 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), concat(concat(concat(concat('c <- (', _col1), ' & '), _col3), ')') (type: string) outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator null sort order: sort order: - Statistics: Num rows: 1 Data size: 206 Basic stats: COMPLETE Column stats: NONE + Statistics: Num rows: 2 Data size: 376 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col0 (type: int), _col1 (type: string) Stage: Stage-3 diff --git a/ql/src/test/results/clientpositive/llap/empty_result_ctas.q.out b/ql/src/test/results/clientpositive/llap/empty_result_ctas.q.out index 57cc1b7e8221..27eba3c3b628 100644 --- a/ql/src/test/results/clientpositive/llap/empty_result_ctas.q.out +++ b/ql/src/test/results/clientpositive/llap/empty_result_ctas.q.out @@ -18,8 +18,8 @@ POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Input: default@t1 POSTHOOK: Output: database:default POSTHOOK: Output: default@T2 -POSTHOOK: Lineage: t2.c_array EXPRESSION [] -POSTHOOK: Lineage: t2.c_nested EXPRESSION [] +POSTHOOK: Lineage: t2.c_array SIMPLE [] +POSTHOOK: Lineage: t2.c_nested SIMPLE [] POSTHOOK: Lineage: t2.c_primitive SIMPLE [] t1.c_primitive t1.c_array t1.c_nested PREHOOK: query: DESCRIBE FORMATTED t2 @@ -49,6 +49,126 @@ Table Parameters: totalSize 0 #### A masked pattern was here #### +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: CREATE TABLE T3 AS SELECT * FROM T1 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@T3 +POSTHOOK: query: CREATE TABLE T3 AS SELECT * FROM T1 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@T3 +POSTHOOK: Lineage: t3.c_array SIMPLE [(t1)t1.FieldSchema(name:c_array, type:array, comment:null), ] +POSTHOOK: Lineage: t3.c_nested SIMPLE [(t1)t1.FieldSchema(name:c_nested, type:array,f3:array>>, comment:null), ] +POSTHOOK: Lineage: t3.c_primitive SIMPLE [(t1)t1.FieldSchema(name:c_primitive, type:int, comment:null), ] +t1.c_primitive t1.c_array t1.c_nested +PREHOOK: query: DESCRIBE FORMATTED t3 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@t3 +POSTHOOK: query: DESCRIBE FORMATTED t3 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@t3 +col_name data_type comment +# col_name data_type comment +c_primitive int +c_array array +c_nested array,f3:array>> + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: create table table1 (a string, b string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table1 +POSTHOOK: query: create table table1 (a string, b string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table1 +PREHOOK: query: create table table2 (complex_column array, `values`:array>>>>) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table2 +POSTHOOK: query: create table table2 (complex_column array, `values`:array>>>>) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table2 +PREHOOK: query: create table table3 as with t1 as (select * from table1), t2 as (select * from table2 where 1=0) select t1.*, t2.* from t1 left join t2 +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@table1 +PREHOOK: Input: default@table2 +PREHOOK: Output: database:default +PREHOOK: Output: default@table3 +POSTHOOK: query: create table table3 as with t1 as (select * from table1), t2 as (select * from table2 where 1=0) select t1.*, t2.* from t1 left join t2 +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table2 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table3 +POSTHOOK: Lineage: table3.a SIMPLE [(table1)table1.FieldSchema(name:a, type:string, comment:null), ] +POSTHOOK: Lineage: table3.b SIMPLE [(table1)table1.FieldSchema(name:b, type:string, comment:null), ] +POSTHOOK: Lineage: table3.complex_column SIMPLE [] +t1.a t1.b t2.complex_column +PREHOOK: query: describe formatted table3 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@table3 +POSTHOOK: query: describe formatted table3 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@table3 +col_name data_type comment +# col_name data_type comment +a string +b string +complex_column array,values:array>>>> + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 + numFiles 0 + numRows 0 + rawDataSize 0 + totalSize 0 +#### A masked pattern was here #### + # Storage Information SerDe Library: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe InputFormat: org.apache.hadoop.mapred.TextInputFormat diff --git a/ql/src/test/results/clientpositive/llap/groupby_sort_2_23.q.out b/ql/src/test/results/clientpositive/llap/groupby_sort_2_23.q.out new file mode 100644 index 000000000000..780cb6ccd27b --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/groupby_sort_2_23.q.out @@ -0,0 +1,180 @@ +PREHOOK: query: create table test_bucket(age int, name string, dept string) clustered by (age, name) sorted by (age asc, name asc) into 2 buckets stored as ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@test_bucket +POSTHOOK: query: create table test_bucket(age int, name string, dept string) clustered by (age, name) sorted by (age asc, name asc) into 2 buckets stored as ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_bucket +PREHOOK: query: insert into test_bucket values (1, 'user1', 'dept1'), ( 2, 'user2' , 'dept2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_bucket +POSTHOOK: query: insert into test_bucket values (1, 'user1', 'dept1'), ( 2, 'user2' , 'dept2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_bucket +POSTHOOK: Lineage: test_bucket.age SCRIPT [] +POSTHOOK: Lineage: test_bucket.dept SCRIPT [] +POSTHOOK: Lineage: test_bucket.name SCRIPT [] +PREHOOK: query: insert into test_bucket values (1, 'user1', 'dept1'), ( 2, 'user2' , 'dept2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@test_bucket +POSTHOOK: query: insert into test_bucket values (1, 'user1', 'dept1'), ( 2, 'user2' , 'dept2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@test_bucket +POSTHOOK: Lineage: test_bucket.age SCRIPT [] +POSTHOOK: Lineage: test_bucket.dept SCRIPT [] +POSTHOOK: Lineage: test_bucket.name SCRIPT [] +PREHOOK: query: explain vectorization detail select age, name, count(*) from test_bucket group by age, name having count(*) > 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_bucket +#### A masked pattern was here #### +POSTHOOK: query: explain vectorization detail select age, name, count(*) from test_bucket group by age, name having count(*) > 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_bucket +#### A masked pattern was here #### +PLAN VECTORIZATION: + enabled: true + enabledConditionsMet: [hive.vectorized.execution.enabled IS true] + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: test_bucket + Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + TableScan Vectorization: + native: true + vectorizationSchemaColumns: [0:age:int, 1:name:string, 2:dept:string, 3:ROW__ID:struct, 4:ROW__IS__DELETED:boolean] + Select Operator + expressions: age (type: int), name (type: string) + outputColumnNames: age, name + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [0, 1] + Statistics: Num rows: 4 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: count() + bucketGroup: true + Group By Vectorization: + aggregators: VectorUDAFCountStar(*) -> bigint + className: VectorGroupByOperator + groupByMode: HASH + keyExpressions: col 0:int, col 1:string + native: false + vectorProcessingMode: HASH + projectedOutputColumnNums: [0] + keys: age (type: int), name (type: string) + minReductionHashAggr: 0.5 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 202 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: string) + Reduce Sink Vectorization: + className: VectorReduceSinkMultiKeyOperator + keyColumns: 0:int, 1:string + native: true + nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true + valueColumns: 2:bigint + Statistics: Num rows: 2 Data size: 202 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint) + Execution mode: vectorized, llap + LLAP IO: all inputs + Map Vectorization: + enabled: true + enabledConditionsMet: hive.vectorized.use.vectorized.input.format IS true + inputFormatFeatureSupport: [DECIMAL_64] + featureSupportInUse: [DECIMAL_64] + inputFileFormats: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + allNative: false + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 3 + includeColumns: [0, 1] + dataColumns: age:int, name:string, dept:string + partitionColumnCount: 0 + scratchColumnTypeNames: [] + Reducer 2 + Execution mode: vectorized, llap + Reduce Vectorization: + enabled: true + enableConditionsMet: hive.vectorized.execution.reduce.enabled IS true, hive.execution.engine tez IN [tez] IS true + reduceColumnNullOrder: zz + reduceColumnSortOrder: ++ + allNative: false + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 3 + dataColumns: KEY._col0:int, KEY._col1:string, VALUE._col0:bigint + partitionColumnCount: 0 + scratchColumnTypeNames: [] + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + Group By Vectorization: + aggregators: VectorUDAFCountMerge(col 2:bigint) -> bigint + className: VectorGroupByOperator + groupByMode: MERGEPARTIAL + keyExpressions: col 0:int, col 1:string + native: false + vectorProcessingMode: MERGE_PARTIAL + projectedOutputColumnNums: [0] + keys: KEY._col0 (type: int), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 202 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + Filter Vectorization: + className: VectorFilterOperator + native: true + predicateExpression: FilterLongColGreaterLongScalar(col 2:bigint, val 1) + predicate: (_col2 > 1L) (type: boolean) + Statistics: Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + File Sink Vectorization: + className: VectorFileSinkOperator + native: false + Statistics: Num rows: 1 Data size: 101 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select age, name, count(*) from test_bucket group by age, name having count(*) > 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@test_bucket +#### A masked pattern was here #### +POSTHOOK: query: select age, name, count(*) from test_bucket group by age, name having count(*) > 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_bucket +#### A masked pattern was here #### +1 user1 2 +2 user2 2 diff --git a/ql/src/test/results/clientpositive/llap/in_coercion.q.out b/ql/src/test/results/clientpositive/llap/in_coercion.q.out index fe784ef6ec82..95fc24039751 100644 --- a/ql/src/test/results/clientpositive/llap/in_coercion.q.out +++ b/ql/src/test/results/clientpositive/llap/in_coercion.q.out @@ -31,10 +31,12 @@ POSTHOOK: Input: default@src_table NULL PREHOOK: query: select count(*) from src_table where key in (355.8) PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Input: default@src_table #### A masked pattern was here #### POSTHOOK: query: select count(*) from src_table where key in (355.8) POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Input: default@src_table #### A masked pattern was here #### 0 diff --git a/ql/src/test/results/clientpositive/llap/insert_and_load_overwrite_drop_partition.q.out b/ql/src/test/results/clientpositive/llap/insert_and_load_overwrite_drop_partition.q.out new file mode 100644 index 000000000000..c16cb464568d --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/insert_and_load_overwrite_drop_partition.q.out @@ -0,0 +1,256 @@ +PREHOOK: query: CREATE EXTERNAL TABLE `table1`( + `name` string, + `number` string) +PARTITIONED BY ( + `part_col` string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table1 +POSTHOOK: query: CREATE EXTERNAL TABLE `table1`( + `name` string, + `number` string) +PARTITIONED BY ( + `part_col` string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table1 +PREHOOK: query: CREATE EXTERNAL TABLE `table2`( + `name` string, + `number` string) +PARTITIONED BY ( + `part_col` string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table2 +POSTHOOK: query: CREATE EXTERNAL TABLE `table2`( + `name` string, + `number` string) +PARTITIONED BY ( + `part_col` string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table2 +PREHOOK: query: insert into table table1 values ('a', '10', 'part1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table1 +POSTHOOK: query: insert into table table1 values ('a', '10', 'part1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table1 +POSTHOOK: Output: default@table1@part_col=part1 +POSTHOOK: Lineage: table1 PARTITION(part_col=part1).name SCRIPT [] +POSTHOOK: Lineage: table1 PARTITION(part_col=part1).number SCRIPT [] +PREHOOK: query: insert into table table1 values ('b', '11', 'part1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table1 +POSTHOOK: query: insert into table table1 values ('b', '11', 'part1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table1 +POSTHOOK: Output: default@table1@part_col=part1 +POSTHOOK: Lineage: table1 PARTITION(part_col=part1).name SCRIPT [] +POSTHOOK: Lineage: table1 PARTITION(part_col=part1).number SCRIPT [] +PREHOOK: query: insert into table table1 values ('a2', '2', 'part2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table1 +POSTHOOK: query: insert into table table1 values ('a2', '2', 'part2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table1 +POSTHOOK: Output: default@table1@part_col=part2 +POSTHOOK: Lineage: table1 PARTITION(part_col=part2).name SCRIPT [] +POSTHOOK: Lineage: table1 PARTITION(part_col=part2).number SCRIPT [] +PREHOOK: query: insert into table table2 values ('x', '100', 'part1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: insert into table table2 values ('x', '100', 'part1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part1 +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).name SCRIPT [] +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).number SCRIPT [] +PREHOOK: query: insert into table table2 values ('y', '101', 'part1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: insert into table table2 values ('y', '101', 'part1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part1 +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).name SCRIPT [] +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).number SCRIPT [] +PREHOOK: query: insert into table table2 values ('z', '102', 'part1') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: insert into table table2 values ('z', '102', 'part1') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part1 +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).name SCRIPT [] +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).number SCRIPT [] +PREHOOK: query: insert into table table2 values ('x2', '200', 'part2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: insert into table table2 values ('x2', '200', 'part2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part2 +POSTHOOK: Lineage: table2 PARTITION(part_col=part2).name SCRIPT [] +POSTHOOK: Lineage: table2 PARTITION(part_col=part2).number SCRIPT [] +PREHOOK: query: insert into table table2 values ('y2', '201', 'part2') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: insert into table table2 values ('y2', '201', 'part2') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part2 +POSTHOOK: Lineage: table2 PARTITION(part_col=part2).name SCRIPT [] +POSTHOOK: Lineage: table2 PARTITION(part_col=part2).number SCRIPT [] +PREHOOK: query: insert into table table2 values ('x3', '300', 'part3') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@table2 +POSTHOOK: query: insert into table table2 values ('x3', '300', 'part3') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part3 +POSTHOOK: Lineage: table2 PARTITION(part_col=part3).name SCRIPT [] +POSTHOOK: Lineage: table2 PARTITION(part_col=part3).number SCRIPT [] +PREHOOK: query: alter table table2 drop partition(part_col='part1') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@table2 +PREHOOK: Output: default@table2@part_col=part1 +POSTHOOK: query: alter table table2 drop partition(part_col='part1') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@table2 +POSTHOOK: Output: default@table2@part_col=part1 +PREHOOK: query: select count(*) from table2 where part_col='part1' +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from table2 where part_col='part1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +#### A masked pattern was here #### +0 +Found 3 items +#### A masked pattern was here #### +PREHOOK: query: insert overwrite table table2 partition(part_col='part1') select name, number from table1 where part_col='part1' +PREHOOK: type: QUERY +PREHOOK: Input: default@table1 +PREHOOK: Input: default@table1@part_col=part1 +PREHOOK: Output: default@table2@part_col=part1 +POSTHOOK: query: insert overwrite table table2 partition(part_col='part1') select name, number from table1 where part_col='part1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table1 +POSTHOOK: Input: default@table1@part_col=part1 +POSTHOOK: Output: default@table2@part_col=part1 +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).name SIMPLE [(table1)table1.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: table2 PARTITION(part_col=part1).number SIMPLE [(table1)table1.FieldSchema(name:number, type:string, comment:null), ] +PREHOOK: query: select count(*) from table2 where part_col='part1' +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +PREHOOK: Input: default@table2@part_col=part1 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from table2 where part_col='part1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2@part_col=part1 +#### A masked pattern was here #### +2 +Found 1 items +#### A masked pattern was here #### +PREHOOK: query: alter table table2 drop partition(part_col='part2') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@table2 +PREHOOK: Output: default@table2@part_col=part2 +POSTHOOK: query: alter table table2 drop partition(part_col='part2') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@table2 +POSTHOOK: Output: default@table2@part_col=part2 +PREHOOK: query: select count(*) from table2 where part_col='part2' +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from table2 where part_col='part2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +#### A masked pattern was here #### +0 +Found 2 items +#### A masked pattern was here #### +PREHOOK: query: insert overwrite table table2 partition(part_col='part2') select name, number from table1 where part_col='dummy_part' +PREHOOK: type: QUERY +PREHOOK: Input: default@table1 +PREHOOK: Output: default@table2@part_col=part2 +POSTHOOK: query: insert overwrite table table2 partition(part_col='part2') select name, number from table1 where part_col='dummy_part' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table1 +POSTHOOK: Output: default@table2@part_col=part2 +POSTHOOK: Lineage: table2 PARTITION(part_col=part2).name SIMPLE [(table1)table1.FieldSchema(name:name, type:string, comment:null), ] +POSTHOOK: Lineage: table2 PARTITION(part_col=part2).number SIMPLE [(table1)table1.FieldSchema(name:number, type:string, comment:null), ] +PREHOOK: query: select count(*) from table2 where part_col='part2' +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +PREHOOK: Input: default@table2@part_col=part2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from table2 where part_col='part2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2@part_col=part2 +#### A masked pattern was here #### +0 +PREHOOK: query: alter table table2 drop partition(part_col='part3') +PREHOOK: type: ALTERTABLE_DROPPARTS +PREHOOK: Input: default@table2 +PREHOOK: Output: default@table2@part_col=part3 +POSTHOOK: query: alter table table2 drop partition(part_col='part3') +POSTHOOK: type: ALTERTABLE_DROPPARTS +POSTHOOK: Input: default@table2 +POSTHOOK: Output: default@table2@part_col=part3 +PREHOOK: query: select count(*) from table2 where part_col='part3' +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from table2 where part_col='part3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +#### A masked pattern was here #### +0 +Found 1 items +#### A masked pattern was here #### +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' OVERWRITE INTO TABLE table2 PARTITION(part_col='part3') +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@table2 +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv5.txt' OVERWRITE INTO TABLE table2 PARTITION(part_col='part3') +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@table2 +POSTHOOK: Output: default@table2@part_col=part3 +PREHOOK: query: select count(*) from table2 where part_col='part3' +PREHOOK: type: QUERY +PREHOOK: Input: default@table2 +PREHOOK: Input: default@table2@part_col=part3 +#### A masked pattern was here #### +POSTHOOK: query: select count(*) from table2 where part_col='part3' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@table2 +POSTHOOK: Input: default@table2@part_col=part3 +#### A masked pattern was here #### +24 +Found 1 items +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/like_control_characters.q.out b/ql/src/test/results/clientpositive/llap/like_control_characters.q.out new file mode 100644 index 000000000000..14aa86328db4 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/like_control_characters.q.out @@ -0,0 +1,93 @@ +PREHOOK: query: create temporary table foo (col string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@foo +POSTHOOK: query: create temporary table foo (col string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@foo +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/control_characters.txt' INTO TABLE foo +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@foo +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/control_characters.txt' INTO TABLE foo +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@foo +PREHOOK: query: explain select col, count(*) from foo where col like '%fg%' group by col +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: explain select col, count(*) from foo where col like '%fg%' group by col +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: foo + filterExpr: (col like '%fg%') (type: boolean) + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (col like '%fg%') (type: boolean) + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count() + keys: col (type: string) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Execution mode: vectorized, llap + LLAP IO: all inputs + Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 184 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select col, count(*) from foo where col like '%fg%' group by col +PREHOOK: type: QUERY +PREHOOK: Input: default@foo +#### A masked pattern was here #### +POSTHOOK: query: select col, count(*) from foo where col like '%fg%' group by col +POSTHOOK: type: QUERY +POSTHOOK: Input: default@foo +#### A masked pattern was here #### +abcde�fghi 1 diff --git a/ql/src/test/results/clientpositive/llap/mapjoin_date.q.out b/ql/src/test/results/clientpositive/llap/mapjoin_date.q.out new file mode 100644 index 000000000000..c5dfc75a5f30 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/mapjoin_date.q.out @@ -0,0 +1,186 @@ +PREHOOK: query: CREATE TABLE person (fname string, birthDate date) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@person +POSTHOOK: query: CREATE TABLE person (fname string, birthDate date) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@person +PREHOOK: query: INSERT INTO person VALUES ('Victor', '2023-11-27'), ('Alexandre', '2023-11-28') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@person +POSTHOOK: query: INSERT INTO person VALUES ('Victor', '2023-11-27'), ('Alexandre', '2023-11-28') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@person +POSTHOOK: Lineage: person.birthdate SCRIPT [] +POSTHOOK: Lineage: person.fname SCRIPT [] +PREHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT * FROM person p1 INNER JOIN person p2 ON p1.birthDate=p2.birthDate +PREHOOK: type: QUERY +PREHOOK: Input: default@person +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN VECTORIZATION DETAIL SELECT * FROM person p1 INNER JOIN person p2 ON p1.birthDate=p2.birthDate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@person +#### A masked pattern was here #### +PLAN VECTORIZATION: + enabled: true + enabledConditionsMet: [hive.vectorized.execution.enabled IS true] + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: p1 + filterExpr: birthdate is not null (type: boolean) + probeDecodeDetails: cacheKey:HASH_MAP_MAPJOIN_25_container, bigKeyColName:birthdate, smallTablePos:1, keyRatio:0.0 + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + TableScan Vectorization: + native: true + vectorizationSchemaColumns: [0:fname:string, 1:birthdate:date, 2:ROW__ID:struct, 3:ROW__IS__DELETED:boolean] + Filter Operator + Filter Vectorization: + className: VectorFilterOperator + native: true + predicateExpression: SelectColumnIsNotNull(col 1:date) + predicate: birthdate is not null (type: boolean) + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: fname (type: string), birthdate (type: date) + outputColumnNames: _col0, _col1 + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [0, 1] + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col1 (type: date) + 1 _col1 (type: date) + Map Join Vectorization: + bigTableKeyColumns: 1:date + bigTableRetainColumnNums: [0, 1] + bigTableValueColumns: 0:string, 1:date + className: VectorMapJoinInnerLongOperator + native: true + nativeConditionsMet: hive.mapjoin.optimized.hashtable IS true, hive.vectorized.execution.mapjoin.native.enabled IS true, hive.execution.engine tez IN [tez] IS true, One MapJoin Condition IS true, No nullsafe IS true, Small table vectorizes IS true, Optimized Table and Supports Key Types IS true + nonOuterSmallTableKeyMapping: [] + projectedOutput: 0:string, 1:date, 4:string, 1:date + smallTableValueMapping: 4:string + hashTableImplementationType: OPTIMIZED + outputColumnNames: _col0, _col1, _col2, _col3 + input vertices: + 1 Map 2 + Statistics: Num rows: 4 Data size: 1184 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + File Sink Vectorization: + className: VectorFileSinkOperator + native: false + Statistics: Num rows: 4 Data size: 1184 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Execution mode: vectorized, llap + LLAP IO: all inputs + Map Vectorization: + enabled: true + enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true + inputFormatFeatureSupport: [DECIMAL_64] + featureSupportInUse: [DECIMAL_64] + inputFileFormats: org.apache.hadoop.mapred.TextInputFormat + allNative: false + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 2 + includeColumns: [0, 1] + dataColumns: fname:string, birthdate:date + partitionColumnCount: 0 + scratchColumnTypeNames: [string] + Map 2 + Map Operator Tree: + TableScan + alias: p2 + filterExpr: birthdate is not null (type: boolean) + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + TableScan Vectorization: + native: true + vectorizationSchemaColumns: [0:fname:string, 1:birthdate:date, 2:ROW__ID:struct, 3:ROW__IS__DELETED:boolean] + Filter Operator + Filter Vectorization: + className: VectorFilterOperator + native: true + predicateExpression: SelectColumnIsNotNull(col 1:date) + predicate: birthdate is not null (type: boolean) + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: fname (type: string), birthdate (type: date) + outputColumnNames: _col0, _col1 + Select Vectorization: + className: VectorSelectOperator + native: true + projectedOutputColumnNums: [0, 1] + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col1 (type: date) + null sort order: z + sort order: + + Map-reduce partition columns: _col1 (type: date) + Reduce Sink Vectorization: + className: VectorReduceSinkLongOperator + keyColumns: 1:date + native: true + nativeConditionsMet: hive.vectorized.execution.reducesink.new.enabled IS true, hive.execution.engine tez IN [tez] IS true, No PTF TopN IS true, No DISTINCT columns IS true, BinarySortableSerDe for keys IS true, LazyBinarySerDe for values IS true + valueColumns: 0:string + Statistics: Num rows: 2 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: string) + Execution mode: vectorized, llap + LLAP IO: all inputs + Map Vectorization: + enabled: true + enabledConditionsMet: hive.vectorized.use.vector.serde.deserialize IS true + inputFormatFeatureSupport: [DECIMAL_64] + featureSupportInUse: [DECIMAL_64] + inputFileFormats: org.apache.hadoop.mapred.TextInputFormat + allNative: true + usesVectorUDFAdaptor: false + vectorized: true + rowBatchContext: + dataColumnCount: 2 + includeColumns: [0, 1] + dataColumns: fname:string, birthdate:date + partitionColumnCount: 0 + scratchColumnTypeNames: [] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT * FROM person p1 INNER JOIN person p2 ON p1.birthDate=p2.birthDate +PREHOOK: type: QUERY +PREHOOK: Input: default@person +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM person p1 INNER JOIN person p2 ON p1.birthDate=p2.birthDate +POSTHOOK: type: QUERY +POSTHOOK: Input: default@person +#### A masked pattern was here #### +Victor 2023-11-27 Victor 2023-11-27 +Alexandre 2023-11-28 Alexandre 2023-11-28 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_12.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_12.q.out new file mode 100644 index 000000000000..81a33edecfa9 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_12.q.out @@ -0,0 +1,211 @@ +PREHOOK: query: create table t1 (a int, b int) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1 (a int, b int) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: insert into t1 values +(3, 3), +(2, 1), +(2, 2), +(1, 2), +(1, 1) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t1 +POSTHOOK: query: insert into t1 values +(3, 3), +(2, 1), +(2, 2), +(1, 2), +(1, 1) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t1 +POSTHOOK: Lineage: t1.a SCRIPT [] +POSTHOOK: Lineage: t1.b SCRIPT [] +PREHOOK: query: CREATE MATERIALIZED VIEW mat1 + TBLPROPERTIES ('transactional'='true') AS +SELECT a +FROM t1 +WHERE b < 10 +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@t1 +PREHOOK: Output: database:default +PREHOOK: Output: default@mat1 +POSTHOOK: query: CREATE MATERIALIZED VIEW mat1 + TBLPROPERTIES ('transactional'='true') AS +SELECT a +FROM t1 +WHERE b < 10 +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@t1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mat1 +POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:int, comment:null), ] +PREHOOK: query: delete from t1 where b = 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Output: default@t1 +POSTHOOK: query: delete from t1 where b = 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@t1 +PREHOOK: query: explain +alter materialized view mat1 rebuild +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@t1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: explain +alter materialized view mat1 rebuild +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@mat1 +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: (b < 10) (type: boolean) + Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (b < 10) (type: boolean) + Statistics: Num rows: 3 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int) + outputColumnNames: a + Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a) + minReductionHashAggr: 0.6666666 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Reducer 2 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 1 Data size: 168 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 1 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 264 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-2 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + + Stage: Stage-3 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a + Column Types: int + Table: default.mat1 + + Stage: Stage-4 + Materialized View Update + name: default.mat1 + update creation metadata: true + +PREHOOK: query: alter materialized view mat1 rebuild +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@t1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: alter materialized view mat1 rebuild +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@t1 +POSTHOOK: Output: default@mat1 +POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:int, comment:null), ] +PREHOOK: query: explain cbo +SELECT a +FROM t1 +WHERE b < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: explain cbo +SELECT a +FROM t1 +WHERE b < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +CBO PLAN: +HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1]) + +PREHOOK: query: SELECT a +FROM t1 +WHERE b < 10 +PREHOOK: type: QUERY +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: SELECT a +FROM t1 +WHERE b < 10 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +3 +2 +1 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out index 482c113239b5..1b06d867b39c 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_5.q.out @@ -7,6 +7,8 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_basetable_n6 PREHOOK: query: insert into cmv_basetable_n6 values + (1, 'alfred', 10.30, 2), + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), @@ -16,6 +18,8 @@ PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: default@cmv_basetable_n6 POSTHOOK: query: insert into cmv_basetable_n6 values + (1, 'alfred', 10.30, 2), + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), @@ -37,12 +41,14 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_basetable_2_n3 PREHOOK: query: insert into cmv_basetable_2_n3 values + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: default@cmv_basetable_2_n3 POSTHOOK: query: insert into cmv_basetable_2_n3 values + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3) POSTHOOK: type: QUERY @@ -121,20 +127,20 @@ STAGE PLANS: TableScan alias: cmv_basetable_n6 filterExpr: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: may be used (ACID table) Map 3 @@ -142,20 +148,20 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n3 filterExpr: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10.1) and a is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int) outputColumnNames: _col0 - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 3 Data size: 12 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -168,10 +174,10 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 9 Data size: 36 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat @@ -198,6 +204,11 @@ POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 #### A masked pattern was here #### 1 +1 +1 +1 +1 +1 3 3 3 @@ -235,20 +246,20 @@ STAGE PLANS: TableScan alias: cmv_basetable_n6 filterExpr: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: may be used (ACID table) Map 4 @@ -256,7 +267,7 @@ STAGE PLANS: TableScan alias: cmv_basetable_2_n3 filterExpr: ((ROW__ID.writeid > 1L) and (c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((ROW__ID.writeid > 1L) and (c > 10) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE @@ -391,9 +402,9 @@ Table Parameters: COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\",\"COLUMN_STATS\":{\"a\":\"true\",\"c\":\"true\"}} bucketing_version 2 numFiles 2 - numRows 5 + numRows 10 rawDataSize 0 - totalSize 1516 + totalSize 1522 transactional true transactional_properties default #### A masked pattern was here #### @@ -474,6 +485,11 @@ POSTHOOK: Input: default@cmv_basetable_n6 POSTHOOK: Input: default@cmv_mat_view_n6 #### A masked pattern was here #### 1 +1 +1 +1 +1 +1 3 3 3 @@ -497,101 +513,70 @@ ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD PREHOOK: Input: default@cmv_basetable_2_n3 PREHOOK: Input: default@cmv_basetable_n6 -PREHOOK: Input: default@cmv_mat_view_n6 -PREHOOK: Output: default@cmv_mat_view_n6 PREHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 -POSTHOOK: Input: default@cmv_mat_view_n6 -POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Output: default@cmv_mat_view_n6 STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - Stage-4 depends on stages: Stage-0 - Stage-6 depends on stages: Stage-4, Stage-5 - Stage-1 depends on stages: Stage-3 - Stage-5 depends on stages: Stage-1 + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 depends on stages: Stage-3 STAGE PLANS: - Stage: Stage-2 + Stage: Stage-1 Tez #### A masked pattern was here #### Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - Reducer 4 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) - Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 - Map Operator Tree: - TableScan - alias: default.cmv_mat_view_n6 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), ROW__ID (type: struct) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: struct) - Execution mode: llap - LLAP IO: may be used (ACID table) - Map 5 Map Operator Tree: TableScan alias: cmv_basetable_n6 filterExpr: a is not null (type: boolean) - properties: - acid.fetch.deleted.rows TRUE - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), ROW__IS__DELETED (type: boolean) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: boolean) + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: may be used (ACID table) - Map 7 + Map 4 Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - filterExpr: ((ROW__ID.writeid > 2L) and (c > 10) and a is not null) (type: boolean) - properties: - acid.fetch.deleted.rows TRUE - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + filterExpr: ((c > 10) and a is not null) (type: boolean) + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((ROW__ID.writeid > 2L) and (c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + predicate: ((c > 10) and a is not null) (type: boolean) + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), c (type: decimal(10,2)), ROW__IS__DELETED (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -599,74 +584,41 @@ STAGE PLANS: Reduce Operator Tree: Merge Join Operator condition map: - Right Outer Join 0 to 1 + Inner Join 0 to 1 keys: 0 _col0 (type: int) 1 _col0 (type: int) - nullSafes: [true] - outputColumnNames: _col1, _col2, _col3, _col4 - Statistics: Num rows: 6 Data size: 1176 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: _col4 (type: boolean) - Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: struct) - null sort order: z - sort order: + - Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (not _col4) (type: boolean) - Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col2 + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col2 (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT Select Operator - expressions: _col2 (type: int), _col3 (type: decimal(10,2)) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: INSERT - Select Operator - expressions: _col0 (type: int), _col1 (type: decimal(10,2)) - outputColumnNames: a, c - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: a, c + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) + minReductionHashAggr: 0.8888889 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary) + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary) Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: DELETE - Reducer 4 Execution mode: llap Reduce Operator Tree: Group By Operator @@ -685,53 +637,11 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 6 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 2 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col3 (type: decimal(10,2)), (_col1 or _col4) (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) - Stage: Stage-3 + Stage: Stage-2 Dependency Collection Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: DELETE - - Stage: Stage-4 - Stats Work - Basic Stats Work: - - Stage: Stage-6 - Materialized View Update - name: default.cmv_mat_view_n6 - update creation metadata: true - - Stage: Stage-1 Move Operator tables: replace: false @@ -742,7 +652,7 @@ STAGE PLANS: name: default.cmv_mat_view_n6 Write Type: INSERT - Stage: Stage-5 + Stage: Stage-3 Stats Work Basic Stats Work: Column Stats Desc: @@ -750,19 +660,20 @@ STAGE PLANS: Column Types: int, decimal(10,2) Table: default.cmv_mat_view_n6 + Stage: Stage-4 + Materialized View Update + name: default.cmv_mat_view_n6 + update creation metadata: true + PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD PREHOOK: Input: default@cmv_basetable_2_n3 PREHOOK: Input: default@cmv_basetable_n6 -PREHOOK: Input: default@cmv_mat_view_n6 -PREHOOK: Output: default@cmv_mat_view_n6 PREHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 -POSTHOOK: Input: default@cmv_mat_view_n6 -POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] @@ -818,12 +729,14 @@ POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 POSTHOOK: Input: default@cmv_mat_view_n6 #### A masked pattern was here #### +2 +2 +2 +2 3 3 3 3 -2 -2 PREHOOK: query: DELETE FROM cmv_basetable_2_n3 WHERE a=2 PREHOOK: type: QUERY PREHOOK: Input: default@cmv_basetable_2_n3 @@ -837,101 +750,70 @@ ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD PREHOOK: Input: default@cmv_basetable_2_n3 PREHOOK: Input: default@cmv_basetable_n6 -PREHOOK: Input: default@cmv_mat_view_n6 -PREHOOK: Output: default@cmv_mat_view_n6 PREHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 -POSTHOOK: Input: default@cmv_mat_view_n6 -POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Output: default@cmv_mat_view_n6 STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - Stage-4 depends on stages: Stage-0 - Stage-6 depends on stages: Stage-4, Stage-5 - Stage-1 depends on stages: Stage-3 - Stage-5 depends on stages: Stage-1 + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 depends on stages: Stage-3 STAGE PLANS: - Stage: Stage-2 + Stage: Stage-1 Tez #### A masked pattern was here #### Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - Reducer 4 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) - Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 - Map Operator Tree: - TableScan - alias: default.cmv_mat_view_n6 - Statistics: Num rows: 6 Data size: 24 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), ROW__ID (type: struct) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 6 Data size: 480 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 6 Data size: 480 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: struct) - Execution mode: llap - LLAP IO: may be used (ACID table) - Map 5 Map Operator Tree: TableScan alias: cmv_basetable_n6 filterExpr: a is not null (type: boolean) - properties: - acid.fetch.deleted.rows TRUE - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), ROW__IS__DELETED (type: boolean) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 40 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: boolean) + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: may be used (ACID table) - Map 7 + Map 4 Map Operator Tree: TableScan alias: cmv_basetable_2_n3 - filterExpr: ((ROW__ID.writeid > 3L) and (c > 10) and a is not null) (type: boolean) - properties: - acid.fetch.deleted.rows TRUE + filterExpr: ((c > 10) and a is not null) (type: boolean) Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((ROW__ID.writeid > 3L) and (c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + predicate: ((c > 10) and a is not null) (type: boolean) + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), c (type: decimal(10,2)), ROW__IS__DELETED (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)) Execution mode: llap LLAP IO: may be used (ACID table) Reducer 2 @@ -939,74 +821,41 @@ STAGE PLANS: Reduce Operator Tree: Merge Join Operator condition map: - Right Outer Join 0 to 1 + Inner Join 0 to 1 keys: 0 _col0 (type: int) 1 _col0 (type: int) - nullSafes: [true] - outputColumnNames: _col1, _col2, _col3, _col4 - Statistics: Num rows: 10 Data size: 1960 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: _col4 (type: boolean) - Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: struct) - null sort order: z - sort order: + - Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (not _col4) (type: boolean) - Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col2 + Statistics: Num rows: 7 Data size: 812 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col2 (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 7 Data size: 812 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 7 Data size: 812 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT Select Operator - expressions: _col2 (type: int), _col3 (type: decimal(10,2)) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: INSERT - Select Operator - expressions: _col0 (type: int), _col1 (type: decimal(10,2)) - outputColumnNames: a, c - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: a, c + Statistics: Num rows: 7 Data size: 812 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) + minReductionHashAggr: 0.85714287 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary) + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary) Reducer 3 - Execution mode: llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: DELETE - Reducer 4 Execution mode: llap Reduce Operator Tree: Group By Operator @@ -1025,53 +874,11 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 6 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 3 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col3 (type: decimal(10,2)), (_col1 or _col4) (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) - Stage: Stage-3 + Stage: Stage-2 Dependency Collection Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: DELETE - - Stage: Stage-4 - Stats Work - Basic Stats Work: - - Stage: Stage-6 - Materialized View Update - name: default.cmv_mat_view_n6 - update creation metadata: true - - Stage: Stage-1 Move Operator tables: replace: false @@ -1082,7 +889,7 @@ STAGE PLANS: name: default.cmv_mat_view_n6 Write Type: INSERT - Stage: Stage-5 + Stage: Stage-3 Stats Work Basic Stats Work: Column Stats Desc: @@ -1090,19 +897,20 @@ STAGE PLANS: Column Types: int, decimal(10,2) Table: default.cmv_mat_view_n6 + Stage: Stage-4 + Materialized View Update + name: default.cmv_mat_view_n6 + update creation metadata: true + PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD PREHOOK: Input: default@cmv_basetable_2_n3 PREHOOK: Input: default@cmv_basetable_n6 -PREHOOK: Input: default@cmv_mat_view_n6 -PREHOOK: Output: default@cmv_mat_view_n6 PREHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 -POSTHOOK: Input: default@cmv_mat_view_n6 -POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] @@ -1209,20 +1017,20 @@ STAGE PLANS: TableScan alias: cmv_basetable_n6 filterExpr: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: llap LLAP IO: may be used (ACID table) Map 4 @@ -1257,14 +1065,14 @@ STAGE PLANS: 0 _col0 (type: int) 1 _col0 (type: int) outputColumnNames: _col0, _col2 - Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: _col0 (type: int), _col2 (type: decimal(10,2)) outputColumnNames: _col0, _col1 - Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat @@ -1274,10 +1082,10 @@ STAGE PLANS: Select Operator expressions: _col0 (type: int), _col1 (type: decimal(10,2)) outputColumnNames: a, c - Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) - minReductionHashAggr: 0.5 + minReductionHashAggr: 0.6666666 mode: hash outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE @@ -1402,6 +1210,8 @@ POSTHOOK: Input: default@cmv_mat_view_n6 3 3 1 +1 +1 PREHOOK: query: drop materialized view cmv_mat_view_n6 PREHOOK: type: DROP_MATERIALIZED_VIEW PREHOOK: Input: default@cmv_mat_view_n6 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6.q.out index baeed8afc974..2c794c918da5 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6.q.out @@ -253,12 +253,12 @@ HiveProject(a=[$5], _o__c1=[CAST(CASE(IS NULL($1), $6, IS NULL($6), $1, +($6, $1 HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1]) HiveProject(a=[$0], $f1=[$1], $f2=[$2], $f3=[$3]) HiveAggregate(group=[{0}], agg#0=[SUM($1)], agg#1=[SUM($2)], agg#2=[SUM($3)]) - HiveProject(a=[$0], $f3=[CASE(OR($2, $5), *(-1, $1), $1)], $f4=[CASE(OR($2, $5), *(-1, CASE(IS NULL($1), 0, 1)), CASE(IS NULL($1), 0, 1))], $f5=[CASE(OR($2, $5), -1, 1)]) - HiveJoin(condition=[AND(=($0, $4), OR($3, $6))], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(a=[$0], b=[$1], ROW__IS__DELETED=[$6], <=[<(3, $5.writeid)]) + HiveProject(a=[$0], $f4=[CASE(OR($3, $7), *(-1, $1), $1)], $f5=[CASE(OR($3, $7), *(-1, CASE(IS NULL($1), 0, 1)), CASE(IS NULL($1), 0, 1))], $f6=[CASE(OR($3, $7), -1, 1)]) + HiveJoin(condition=[AND(=($0, $5), OR(AND(NOT($3), NOT($7)), AND(NOT($4), NOT($8))), OR(<(3, $2.writeid), <(3, $6.writeid)))], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], b=[$1], ROW__ID=[$5], _deleted=[AND($6, <(3, $5.writeid))], _inserted=[AND(<(3, $5.writeid), NOT($6))]) HiveFilter(condition=[IS NOT NULL($0)]) HiveTableScan(table=[[default, t1]], table:alias=[t1]) - HiveProject(a=[$0], ROW__IS__DELETED=[$5], <=[<(3, $4.writeid)]) + HiveProject(a=[$0], ROW__ID=[$4], _deleted=[AND($5, <(3, $4.writeid))], _inserted=[AND(<(3, $4.writeid), NOT($5))]) HiveFilter(condition=[IS NOT NULL($0)]) HiveTableScan(table=[[default, t2]], table:alias=[t2]) @@ -335,16 +335,16 @@ STAGE PLANS: predicate: a is not null (type: boolean) Statistics: Num rows: 9 Data size: 837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: char(15)), ROW__IS__DELETED (type: boolean), (ROW__ID.writeid > 3L) (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 9 Data size: 909 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: char(15)), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 3L)) (type: boolean), ((ROW__ID.writeid > 3L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 9 Data size: 1593 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 9 Data size: 909 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: boolean), _col2 (type: boolean) + Statistics: Num rows: 9 Data size: 1593 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: boolean), _col3 (type: boolean) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 7 @@ -359,16 +359,16 @@ STAGE PLANS: predicate: a is not null (type: boolean) Statistics: Num rows: 7 Data size: 1211 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: char(15)), b (type: decimal(7,2)), ROW__IS__DELETED (type: boolean), (ROW__ID.writeid > 3L) (type: boolean) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 7 Data size: 1267 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: char(15)), b (type: decimal(7,2)), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 3L)) (type: boolean), ((ROW__ID.writeid > 3L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 7 Data size: 1799 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 7 Data size: 1267 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(7,2)), _col2 (type: boolean), _col3 (type: boolean) + Statistics: Num rows: 7 Data size: 1799 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(7,2)), _col2 (type: struct), _col3 (type: boolean), _col4 (type: boolean) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -382,7 +382,7 @@ STAGE PLANS: 1 _col0 (type: char(15)) nullSafes: [true] outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 - Statistics: Num rows: 5 Data size: 2033 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 746 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col4 and ((_col3 is null and (_col9 > 0L)) or (((_col9 + _col3) > 0) and _col3 is not null))) (type: boolean) Statistics: Num rows: 1 Data size: 429 Basic stats: COMPLETE Column stats: COMPLETE @@ -550,26 +550,26 @@ STAGE PLANS: keys: 0 _col0 (type: char(15)) 1 _col0 (type: char(15)) - outputColumnNames: _col0, _col1, _col2, _col3, _col5, _col6 - residual filter predicates: {(_col3 or _col6)} - Statistics: Num rows: 6 Data size: 1102 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col6, _col7, _col8 + residual filter predicates: {(((not _col3) and (not _col7)) or ((not _col4) and (not _col8)))} {((_col2.writeid > 3L) or (_col6.writeid > 3L))} + Statistics: Num rows: 1 Data size: 373 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: char(15)), if((_col2 or _col5), (-1 * _col1), _col1) (type: decimal(17,2)), if((_col2 or _col5), (-1 * if(_col1 is null, 0, 1)), if(_col1 is null, 0, 1)) (type: int), if((_col2 or _col5), -1, 1) (type: int) + expressions: _col0 (type: char(15)), if((_col3 or _col7), (-1 * _col1), _col1) (type: decimal(17,2)), if((_col3 or _col7), (-1 * if(_col1 is null, 0, 1)), if(_col1 is null, 0, 1)) (type: int), if((_col3 or _col7), -1, 1) (type: int) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 6 Data size: 1102 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 373 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col1), sum(_col2), sum(_col3) keys: _col0 (type: char(15)) minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1105 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 5 Data size: 1105 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(27,2)), _col2 (type: bigint), _col3 (type: bigint) Reducer 9 Execution mode: vectorized, llap @@ -579,13 +579,13 @@ STAGE PLANS: keys: KEY._col0 (type: char(15)) mode: mergepartial outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 5 Data size: 1105 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 5 Data size: 1105 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 221 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: decimal(27,2)), _col2 (type: bigint), _col3 (type: bigint) Stage: Stage-5 @@ -674,14 +674,14 @@ POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Output: default@mat1 POSTHOOK: Output: default@mat1 -POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] -POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] -POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] -POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] -POSTHOOK: Lineage: mat1._c3 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), (mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), ] -POSTHOOK: Lineage: mat1._c3 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), (mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), ] -POSTHOOK: Lineage: mat1._c4 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c4, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), ] -POSTHOOK: Lineage: mat1._c4 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c4, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), ] +POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), ] +POSTHOOK: Lineage: mat1._c3 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), (mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), ] +POSTHOOK: Lineage: mat1._c3 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:decimal(17,2), comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:decimal(7,2), comment:null), (mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), ] +POSTHOOK: Lineage: mat1._c4 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c4, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: mat1._c4 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c4, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:char(15), comment:null), ] POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:char(15), comment:null), ] PREHOOK: query: explain cbo diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6_aggr_2joins.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6_aggr_2joins.q.out new file mode 100644 index 000000000000..059859bbc942 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6_aggr_2joins.q.out @@ -0,0 +1,600 @@ +PREHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable_n6 +PREHOOK: query: insert into cmv_basetable_n6 values +(1, 'alfred', 10.30, 2), +(1, 'charlie', 20.30, 2), +(2, 'zoe', 100.30, 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: insert into cmv_basetable_n6 values +(1, 'alfred', 10.30, 2), +(1, 'charlie', 20.30, 2), +(2, 'zoe', 100.30, 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: Lineage: cmv_basetable_n6.a SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.b SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.c SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.d SCRIPT [] +PREHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable_2_n3 +PREHOOK: query: insert into cmv_basetable_2_n3 values +(1, 'bob', 30.30, 2), +(1, 'bonnie', 40.30, 2), +(2, 'joe', 130.30, 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: query: insert into cmv_basetable_2_n3 values +(1, 'bob', 30.30, 2), +(1, 'bonnie', 40.30, 2), +(2, 'joe', 130.30, 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: Lineage: cmv_basetable_2_n3.a SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_2_n3.b SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_2_n3.c SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_2_n3.d SCRIPT [] +PREHOOK: query: SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +#### A masked pattern was here #### +2 130.30 joe 1 +1 30.30 bob 2 +1 40.30 bonnie 2 +PREHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n6 TBLPROPERTIES ('transactional'='true') AS +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: query: CREATE MATERIALIZED VIEW cmv_mat_view_n6 TBLPROPERTIES ('transactional'='true') AS +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: Lineage: cmv_mat_view_n6._c3 EXPRESSION [(cmv_basetable_n6)cmv_basetable_n6.null, (cmv_basetable_2_n3)cmv_basetable_2_n3.null, ] +POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.b SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:b, type:varchar(256), comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +PREHOOK: query: insert into cmv_basetable_n6 values +(1, 'kevin', 50.30, 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: insert into cmv_basetable_n6 values +(1, 'kevin', 50.30, 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: Lineage: cmv_basetable_n6.a SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.b SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.c SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.d SCRIPT [] +PREHOOK: query: DELETE FROM cmv_basetable_2_n3 WHERE b = 'bonnie' +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: query: DELETE FROM cmv_basetable_2_n3 WHERE b = 'bonnie' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Output: default@cmv_basetable_2_n3 +PREHOOK: query: EXPLAIN CBO +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: query: EXPLAIN CBO +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +CBO PLAN: +HiveProject(a0=[$5], c0=[$6], b0=[$7], $f3=[CASE(IS NULL($3), $8, +($8, $3))]) + HiveFilter(condition=[OR(AND($4, OR(AND(IS NULL($3), =($8, 0)), AND(=(+($8, $3), 0), IS NOT NULL($3)))), AND(IS NULL($4), OR(AND(IS NULL($3), >($8, 0)), AND(>(+($8, $3), 0), IS NOT NULL($3)))), AND($4, OR(AND(IS NULL($3), >($8, 0)), AND(>(+($8, $3), 0), IS NOT NULL($3)))))]) + HiveJoin(condition=[AND(IS NOT DISTINCT FROM($0, $5), IS NOT DISTINCT FROM($1, $6), IS NOT DISTINCT FROM($2, $7))], joinType=[right], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], c=[$1], b=[$2], _c3=[$3], $f4=[true]) + HiveTableScan(table=[[default, cmv_mat_view_n6]], table:alias=[default.cmv_mat_view_n6]) + HiveProject(a=[$0], c0=[$1], b0=[$2], $f3=[$3]) + HiveAggregate(group=[{0, 1, 2}], agg#0=[SUM($3)]) + HiveProject(a=[$0], c0=[$6], b0=[$5], $f5=[CASE(OR($2, $8), -1, 1)]) + HiveJoin(condition=[AND(=($0, $4), OR(AND(NOT($2), NOT($8)), AND(NOT($3), NOT($9))), OR(<(1, $7.writeid), <(1, $1.writeid)))], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], ROW__ID=[$6], _deleted=[AND($7, <(1, $6.writeid))], _inserted=[AND(<(1, $6.writeid), NOT($7))]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, cmv_basetable_n6]], table:alias=[cmv_basetable_n6]) + HiveProject(a=[$0], b=[$1], c=[$2], ROW__ID=[$6], _deleted=[AND($7, <(1, $6.writeid))], _inserted=[AND(<(1, $6.writeid), NOT($7))]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, cmv_basetable_2_n3]], table:alias=[cmv_basetable_2_n3]) + +PREHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-5 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-0 + Stage-10 depends on stages: Stage-6, Stage-7, Stage-8, Stage-9 + Stage-1 depends on stages: Stage-5 + Stage-7 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-5 + Stage-8 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-5 + Stage-9 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 9 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 2 (SIMPLE_EDGE) + Reducer 5 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 6 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 8 <- Map 10 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) + Reducer 9 <- Reducer 8 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: default.cmv_mat_view_n6 + Statistics: Num rows: 3 Data size: 636 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)), b (type: varchar(256)), _c3 (type: bigint), true (type: boolean), ROW__ID (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 3 Data size: 876 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + Statistics: Num rows: 3 Data size: 876 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col3 (type: bigint), _col4 (type: boolean), _col5 (type: struct) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 10 + Map Operator Tree: + TableScan + alias: cmv_basetable_2_n3 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 2 Data size: 408 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 2 Data size: 408 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), b (type: varchar(256)), c (type: decimal(10,2)), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 1L)) (type: boolean), ((ROW__ID.writeid > 1L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + Statistics: Num rows: 2 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 576 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: varchar(256)), _col2 (type: decimal(10,2)), _col3 (type: struct), _col4 (type: boolean), _col5 (type: boolean) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 7 + Map Operator Tree: + TableScan + alias: cmv_basetable_n6 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 1L)) (type: boolean), ((ROW__ID.writeid > 1L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 4 Data size: 352 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 4 Data size: 352 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: boolean), _col3 (type: boolean) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Right Outer Join 0 to 1 + keys: + 0 _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + 1 _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + nullSafes: [true, true, true] + outputColumnNames: _col3, _col4, _col5, _col6, _col7, _col8, _col9 + Statistics: Num rows: 2 Data size: 600 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col4 and ((_col3 is null and (_col9 > 0L)) or (((_col9 + _col3) > 0) and _col3 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col5 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: struct) + null sort order: z + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col4 and ((_col3 is null and (_col9 = 0L)) or (((_col9 + _col3) = 0) and _col3 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col5 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: struct) + null sort order: z + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col4 and ((_col3 is null and (_col9 > 0L)) or (((_col9 + _col3) > 0) and _col3 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col6 (type: int), _col7 (type: decimal(10,2)), _col8 (type: varchar(256)), if(_col3 is null, _col9, (_col9 + _col3)) (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)), _col3 (type: bigint) + outputColumnNames: a, c, b, _c3 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c), max(length(b)), avg(COALESCE(length(b),0)), count(b), compute_bit_vector_hll(b), min(_c3), max(_c3), count(_c3), compute_bit_vector_hll(_c3) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 + Statistics: Num rows: 1 Data size: 944 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 944 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary), _col9 (type: int), _col10 (type: struct), _col11 (type: bigint), _col12 (type: binary), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: binary) + Filter Operator + predicate: (_col4 is null and ((_col3 is null and (_col9 > 0L)) or (((_col9 + _col3) > 0) and _col3 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 300 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col6 (type: int), _col7 (type: decimal(10,2)), _col8 (type: varchar(256)), if(_col3 is null, _col9, (_col9 + _col3)) (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)), _col3 (type: bigint) + outputColumnNames: a, c, b, _c3 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c), max(length(b)), avg(COALESCE(length(b),0)), count(b), compute_bit_vector_hll(b), min(_c3), max(_c3), count(_c3), compute_bit_vector_hll(_c3) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 + Statistics: Num rows: 1 Data size: 944 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 944 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary), _col9 (type: int), _col10 (type: struct), _col11 (type: bigint), _col12 (type: binary), _col13 (type: bigint), _col14 (type: bigint), _col15 (type: bigint), _col16 (type: binary) + Reducer 3 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: DELETE + Reducer 4 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: DELETE + Reducer 5 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), min(VALUE._col5), max(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), max(VALUE._col9), avg(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12), min(VALUE._col13), max(VALUE._col14), count(VALUE._col15), compute_bit_vector_hll(VALUE._col16) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 + Statistics: Num rows: 1 Data size: 876 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'DECIMAL' (type: string), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col9,0)) (type: bigint), COALESCE(_col10,0) (type: double), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary), 'LONG' (type: string), _col13 (type: bigint), _col14 (type: bigint), (_col2 - _col15) (type: bigint), COALESCE(ndv_compute_bit_vector(_col16),0) (type: bigint), _col16 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + Statistics: Num rows: 1 Data size: 1269 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1269 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 6 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), min(VALUE._col5), max(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), max(VALUE._col9), avg(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12), min(VALUE._col13), max(VALUE._col14), count(VALUE._col15), compute_bit_vector_hll(VALUE._col16) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 + Statistics: Num rows: 1 Data size: 876 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'DECIMAL' (type: string), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'STRING' (type: string), UDFToLong(COALESCE(_col9,0)) (type: bigint), COALESCE(_col10,0) (type: double), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary), 'LONG' (type: string), _col13 (type: bigint), _col14 (type: bigint), (_col2 - _col15) (type: bigint), COALESCE(ndv_compute_bit_vector(_col16),0) (type: bigint), _col16 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23 + Statistics: Num rows: 1 Data size: 1269 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1269 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 8 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col5, _col6, _col7, _col8, _col9 + residual filter predicates: {(((not _col2) and (not _col8)) or ((not _col3) and (not _col9)))} {((_col7.writeid > 1L) or (_col1.writeid > 1L))} + Statistics: Num rows: 1 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col6 (type: decimal(10,2)), _col5 (type: varchar(256)), if((_col2 or _col8), -1, 1) (type: int) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: sum(_col3) + keys: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col3 (type: bigint) + Reducer 9 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)), KEY._col2 (type: varchar(256)) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + null sort order: zzz + sort order: +++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: varchar(256)) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col3 (type: bigint) + + Stage: Stage-5 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: DELETE + + Stage: Stage-6 + Stats Work + Basic Stats Work: + + Stage: Stage-10 + Materialized View Update + name: default.cmv_mat_view_n6 + update creation metadata: true + + Stage: Stage-1 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: DELETE + + Stage: Stage-7 + Stats Work + Basic Stats Work: + + Stage: Stage-2 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT + + Stage: Stage-8 + Stats Work + Basic Stats Work: + + Stage: Stage-3 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT + + Stage: Stage-9 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, c, b, _c3 + Column Types: int, decimal(10,2), varchar(256), bigint + Table: default.cmv_mat_view_n6 + +PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: Lineage: cmv_mat_view_n6._c3 EXPRESSION [(cmv_mat_view_n6)default.cmv_mat_view_n6.FieldSchema(name:_c3, type:bigint, comment:null), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__ID, type:struct, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: cmv_mat_view_n6._c3 EXPRESSION [(cmv_mat_view_n6)default.cmv_mat_view_n6.FieldSchema(name:_c3, type:bigint, comment:null), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__ID, type:struct, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.b SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:b, type:varchar(256), comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.b SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:b, type:varchar(256), comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +PREHOOK: query: select * from cmv_mat_view_n6 +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_mat_view_n6 +#### A masked pattern was here #### +POSTHOOK: query: select * from cmv_mat_view_n6 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_mat_view_n6 +#### A masked pattern was here #### +2 130.30 joe 1 +1 30.30 bob 3 +PREHOOK: query: drop materialized view cmv_mat_view_n6 +PREHOOK: type: DROP_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_mat_view_n6 +PREHOOK: Output: default@cmv_mat_view_n6 +POSTHOOK: query: drop materialized view cmv_mat_view_n6 +POSTHOOK: type: DROP_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_mat_view_n6 +POSTHOOK: Output: default@cmv_mat_view_n6 +PREHOOK: query: SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b, count(*) +FROM cmv_basetable_n6 JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c, cmv_basetable_2_n3.b +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +#### A masked pattern was here #### +2 130.30 joe 1 +1 30.30 bob 3 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6_aggr_3joins.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6_aggr_3joins.q.out new file mode 100644 index 000000000000..163286b1ff49 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_6_aggr_3joins.q.out @@ -0,0 +1,1208 @@ +PREHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: create table cmv_basetable_n6 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable_n6 +PREHOOK: query: insert into cmv_basetable_n6 values +(1, 'alfred', 10.30, 2), +(1, 'charlie', 20.30, 2), +(2, 'zoe', 100.30, 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: insert into cmv_basetable_n6 values +(1, 'alfred', 10.30, 2), +(1, 'charlie', 20.30, 2), +(2, 'zoe', 100.30, 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: Lineage: cmv_basetable_n6.a SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.b SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.c SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.d SCRIPT [] +PREHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: query: create table cmv_basetable_2_n3 (a int, b varchar(256), c decimal(10,2), d int) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@cmv_basetable_2_n3 +PREHOOK: query: insert into cmv_basetable_2_n3 values +(1, 'bob', 30.30, 2), +(1, 'bonnie', 40.30, 2), +(2, 'joe', 130.30, 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: query: insert into cmv_basetable_2_n3 values +(1, 'bob', 30.30, 2), +(1, 'bonnie', 40.30, 2), +(2, 'joe', 130.30, 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: Lineage: cmv_basetable_2_n3.a SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_2_n3.b SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_2_n3.c SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_2_n3.d SCRIPT [] +PREHOOK: query: create table t3 (a int, b varchar(256), c decimal(10,2)) stored as orc TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t3 +POSTHOOK: query: create table t3 (a int, b varchar(256), c decimal(10,2)) stored as orc TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t3 +PREHOOK: query: insert into t3 values +(1, 'foo', 30.30), +(1, 'bar', 30.30), +(2, 'bar', 30.30) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t3 +POSTHOOK: query: insert into t3 values +(1, 'foo', 30.30), +(1, 'bar', 30.30), +(2, 'bar', 30.30) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t3 +POSTHOOK: Lineage: t3.a SCRIPT [] +POSTHOOK: Lineage: t3.b SCRIPT [] +POSTHOOK: Lineage: t3.c SCRIPT [] +PREHOOK: query: CREATE MATERIALIZED VIEW mat1 TBLPROPERTIES ('transactional'='true') AS +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, count(*) +FROM cmv_basetable_n6 +JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +JOIN t3 ON (t3.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_n6.c > 10 OR cmv_basetable_2_n3.c > 10 +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@t3 +PREHOOK: Output: database:default +PREHOOK: Output: default@mat1 +POSTHOOK: query: CREATE MATERIALIZED VIEW mat1 TBLPROPERTIES ('transactional'='true') AS +SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, count(*) +FROM cmv_basetable_n6 +JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +JOIN t3 ON (t3.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_n6.c > 10 OR cmv_basetable_2_n3.c > 10 +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@mat1 +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(cmv_basetable_2_n3)cmv_basetable_2_n3.null, (t3)t3.null, (cmv_basetable_n6)cmv_basetable_n6.null, ] +POSTHOOK: Lineage: mat1.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: mat1.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +PREHOOK: query: insert into cmv_basetable_n6 values +(1, 'kevin', 50.30, 2) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: insert into cmv_basetable_n6 values +(1, 'kevin', 50.30, 2) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: Lineage: cmv_basetable_n6.a SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.b SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.c SCRIPT [] +POSTHOOK: Lineage: cmv_basetable_n6.d SCRIPT [] +PREHOOK: query: insert into t3 values +(1, 'new rec', 60.30) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@t3 +POSTHOOK: query: insert into t3 values +(1, 'new rec', 60.30) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@t3 +POSTHOOK: Lineage: t3.a SCRIPT [] +POSTHOOK: Lineage: t3.b SCRIPT [] +POSTHOOK: Lineage: t3.c SCRIPT [] +PREHOOK: query: DELETE FROM cmv_basetable_2_n3 WHERE b = 'bonnie' +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Output: default@cmv_basetable_2_n3 +POSTHOOK: query: DELETE FROM cmv_basetable_2_n3 WHERE b = 'bonnie' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Output: default@cmv_basetable_2_n3 +PREHOOK: query: EXPLAIN CBO +ALTER MATERIALIZED VIEW mat1 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t3 +PREHOOK: Output: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: EXPLAIN CBO +ALTER MATERIALIZED VIEW mat1 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@mat1 +POSTHOOK: Output: default@mat1 +CBO PLAN: +HiveProject(a0=[$4], c0=[$5], $f2=[CASE(IS NULL($2), $6, +($6, $2))]) + HiveFilter(condition=[OR(AND($3, OR(AND(IS NULL($2), =($6, 0)), AND(=(+($6, $2), 0), IS NOT NULL($2)))), AND(IS NULL($3), OR(AND(IS NULL($2), >($6, 0)), AND(>(+($6, $2), 0), IS NOT NULL($2)))), AND($3, OR(AND(IS NULL($2), >($6, 0)), AND(>(+($6, $2), 0), IS NOT NULL($2)))))]) + HiveJoin(condition=[AND(IS NOT DISTINCT FROM($0, $4), IS NOT DISTINCT FROM($1, $5))], joinType=[right], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], c=[$1], _c2=[$2], $f3=[true]) + HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1]) + HiveProject(a=[$0], c0=[$1], $f2=[$2]) + HiveAggregate(group=[{0, 1}], agg#0=[SUM($2)]) + HiveProject(a=[$9], c0=[$5], $f4=[CASE(OR($12, $7, $2), -1, 1)]) + HiveJoin(condition=[AND(=($9, $4), OR(>($10, 10:DECIMAL(2, 0)), >($5, 10:DECIMAL(2, 0))), OR(AND(NOT($12), NOT($7)), AND(NOT($13), NOT($8))), OR(AND(NOT($12), NOT($7), NOT($2)), AND(NOT($13), NOT($8), NOT($3))), OR(<(1, $6.writeid), <(1, $11.writeid), <(1, $1.writeid)))], joinType=[inner], algorithm=[none], cost=[not available]) + HiveJoin(condition=[=($0, $4)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], ROW__ID=[$5], _deleted=[AND($6, <(1, $5.writeid))], _inserted=[AND(<(1, $5.writeid), NOT($6))]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t3]], table:alias=[t3]) + HiveProject(a=[$0], c=[$2], ROW__ID=[$6], _deleted=[AND($7, <(1, $6.writeid))], _inserted=[AND(<(1, $6.writeid), NOT($7))]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, cmv_basetable_2_n3]], table:alias=[cmv_basetable_2_n3]) + HiveProject(a=[$0], c=[$2], ROW__ID=[$6], _deleted=[AND($7, <(1, $6.writeid))], _inserted=[AND(<(1, $6.writeid), NOT($7))]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, cmv_basetable_n6]], table:alias=[cmv_basetable_n6]) + +PREHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW mat1 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t3 +PREHOOK: Output: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW mat1 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@mat1 +POSTHOOK: Output: default@mat1 +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-5 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-0 + Stage-10 depends on stages: Stage-6, Stage-7, Stage-8, Stage-9 + Stage-1 depends on stages: Stage-5 + Stage-7 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-5 + Stage-8 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-5 + Stage-9 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Tez +#### A masked pattern was here #### + Edges: + Reducer 10 <- Reducer 9 (SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 2 (SIMPLE_EDGE) + Reducer 5 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 6 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 8 <- Map 11 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) + Reducer 9 <- Map 12 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: default.mat1 + Statistics: Num rows: 3 Data size: 372 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)), _c2 (type: bigint), true (type: boolean), ROW__ID (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 3 Data size: 612 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) + Statistics: Num rows: 3 Data size: 612 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint), _col3 (type: boolean), _col4 (type: struct) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 11 + Map Operator Tree: + TableScan + alias: cmv_basetable_2_n3 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 1L)) (type: boolean), ((ROW__ID.writeid > 1L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)), _col2 (type: struct), _col3 (type: boolean), _col4 (type: boolean) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 12 + Map Operator Tree: + TableScan + alias: cmv_basetable_n6 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 1L)) (type: boolean), ((ROW__ID.writeid > 1L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 4 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 4 Data size: 800 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)), _col2 (type: struct), _col3 (type: boolean), _col4 (type: boolean) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 7 + Map Operator Tree: + TableScan + alias: t3 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 1L)) (type: boolean), ((ROW__ID.writeid > 1L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 4 Data size: 352 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 4 Data size: 352 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: boolean), _col3 (type: boolean) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Reducer 10 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Right Outer Join 0 to 1 + keys: + 0 _col0 (type: int), _col1 (type: decimal(10,2)) + 1 _col0 (type: int), _col1 (type: decimal(10,2)) + nullSafes: [true, true] + outputColumnNames: _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 2 Data size: 424 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col3 and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col4 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: struct) + null sort order: z + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col3 and ((_col2 is null and (_col7 = 0L)) or (((_col7 + _col2) = 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col4 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: struct) + null sort order: z + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col3 and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col5 (type: int), _col6 (type: decimal(10,2)), if(_col2 is null, _col7, (_col7 + _col2)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: bigint) + outputColumnNames: a, c, _c2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c), min(_c2), max(_c2), count(_c2), compute_bit_vector_hll(_c2) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: binary) + Filter Operator + predicate: (_col3 is null and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col5 (type: int), _col6 (type: decimal(10,2)), if(_col2 is null, _col7, (_col7 + _col2)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: bigint) + outputColumnNames: a, c, _c2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c), min(_c2), max(_c2), count(_c2), compute_bit_vector_hll(_c2) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: binary) + Reducer 3 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + Reducer 4 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + Reducer 5 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), min(VALUE._col5), max(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'DECIMAL' (type: string), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), _col9 (type: bigint), _col10 (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 6 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), min(VALUE._col5), max(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'DECIMAL' (type: string), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), _col9 (type: bigint), _col10 (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 8 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Statistics: Num rows: 4 Data size: 1136 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col4 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col4 (type: int) + Statistics: Num rows: 4 Data size: 1136 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: boolean), _col3 (type: boolean), _col5 (type: decimal(10,2)), _col6 (type: struct), _col7 (type: boolean), _col8 (type: boolean) + Reducer 9 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col4 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1, _col2, _col3, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 + residual filter predicates: {((_col10 > 10) or (_col5 > 10))} {(((not _col12) and (not _col7)) or ((not _col13) and (not _col8)))} {(((not _col12) and (not _col7) and (not _col2)) or ((not _col13) and (not _col8) and (not _col3)))} {((_col6.writeid > 1L) or (_col11.writeid > 1L) or (_col1.writeid > 1L))} + Statistics: Num rows: 1 Data size: 480 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col9 (type: int), _col5 (type: decimal(10,2)), if((_col12 or _col7 or _col2), -1, 1) (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 480 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: sum(_col2) + keys: _col0 (type: int), _col1 (type: decimal(10,2)) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint) + + Stage: Stage-5 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + + Stage: Stage-6 + Stats Work + Basic Stats Work: + + Stage: Stage-10 + Materialized View Update + name: default.mat1 + update creation metadata: true + + Stage: Stage-1 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + + Stage: Stage-7 + Stats Work + Basic Stats Work: + + Stage: Stage-2 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + + Stage: Stage-8 + Stats Work + Basic Stats Work: + + Stage: Stage-3 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + + Stage: Stage-9 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, c, _c2 + Column Types: int, decimal(10,2), bigint + Table: default.mat1 + +PREHOOK: query: ALTER MATERIALIZED VIEW mat1 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t3 +PREHOOK: Output: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: ALTER MATERIALIZED VIEW mat1 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@mat1 +POSTHOOK: Output: default@mat1 +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__ID, type:struct, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__ID, type:struct, comment:), (t3)t3.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t3)t3.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__ID, type:struct, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:ROW__ID, type:struct, comment:), (t3)t3.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t3)t3.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: mat1.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: mat1.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: mat1.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +POSTHOOK: Lineage: mat1.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +PREHOOK: query: select * from mat1 +PREHOOK: type: QUERY +PREHOOK: Input: default@mat1 +#### A masked pattern was here #### +POSTHOOK: query: select * from mat1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mat1 +#### A masked pattern was here #### +2 130.30 1 +1 30.30 9 +PREHOOK: query: delete from cmv_basetable_n6 where b = 'kevin' +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Output: default@cmv_basetable_n6 +POSTHOOK: query: delete from cmv_basetable_n6 where b = 'kevin' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Output: default@cmv_basetable_n6 +PREHOOK: query: EXPLAIN CBO +ALTER MATERIALIZED VIEW mat1 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t3 +PREHOOK: Output: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: EXPLAIN CBO +ALTER MATERIALIZED VIEW mat1 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@mat1 +POSTHOOK: Output: default@mat1 +CBO PLAN: +HiveProject(a0=[$4], c0=[$5], $f2=[CASE(IS NULL($2), $6, +($6, $2))]) + HiveFilter(condition=[OR(AND($3, OR(AND(IS NULL($2), =($6, 0)), AND(=(+($6, $2), 0), IS NOT NULL($2)))), AND(IS NULL($3), OR(AND(IS NULL($2), >($6, 0)), AND(>(+($6, $2), 0), IS NOT NULL($2)))), AND($3, OR(AND(IS NULL($2), >($6, 0)), AND(>(+($6, $2), 0), IS NOT NULL($2)))))]) + HiveJoin(condition=[AND(IS NOT DISTINCT FROM($0, $4), IS NOT DISTINCT FROM($1, $5))], joinType=[right], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], c=[$1], _c2=[$2], $f3=[true]) + HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1]) + HiveProject(a=[$0], c0=[$1], $f2=[$2]) + HiveAggregate(group=[{0, 1}], agg#0=[SUM($2)]) + HiveProject(a=[$3], c0=[$2], $f4=[CASE($5, -1, 1)]) + HiveJoin(condition=[=($0, $1)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(a=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t3]], table:alias=[t3]) + HiveJoin(condition=[AND(=($2, $0), OR(>($3, 10:DECIMAL(2, 0)), >($1, 10:DECIMAL(2, 0))))], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], c=[$2]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, cmv_basetable_2_n3]], table:alias=[cmv_basetable_2_n3]) + HiveProject(a=[$0], c=[$2], _deleted=[AND($7, <(2, $6.writeid))]) + HiveFilter(condition=[AND(<(2, $6.writeid), OR(NOT($7), >=(2, $6.writeid), $7), IS NOT NULL($0))]) + HiveTableScan(table=[[default, cmv_basetable_n6]], table:alias=[cmv_basetable_n6]) + +PREHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW mat1 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t3 +PREHOOK: Output: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: EXPLAIN +ALTER MATERIALIZED VIEW mat1 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@mat1 +POSTHOOK: Output: default@mat1 +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-5 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-5 + Stage-6 depends on stages: Stage-0 + Stage-10 depends on stages: Stage-6, Stage-7, Stage-8, Stage-9 + Stage-1 depends on stages: Stage-5 + Stage-7 depends on stages: Stage-1 + Stage-2 depends on stages: Stage-5 + Stage-8 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-5 + Stage-9 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Tez +#### A masked pattern was here #### + Edges: + Reducer 10 <- Reducer 9 (SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 10 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (SIMPLE_EDGE) + Reducer 4 <- Reducer 2 (SIMPLE_EDGE) + Reducer 5 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 6 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) + Reducer 8 <- Map 11 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) + Reducer 9 <- Map 12 (SIMPLE_EDGE), Reducer 8 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: default.mat1 + Statistics: Num rows: 2 Data size: 248 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)), _c2 (type: bigint), true (type: boolean), ROW__ID (type: struct) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 2 Data size: 408 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) + Statistics: Num rows: 2 Data size: 408 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint), _col3 (type: boolean), _col4 (type: struct) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 11 + Map Operator Tree: + TableScan + alias: cmv_basetable_n6 + filterExpr: ((ROW__ID.writeid > 2L) and ((not ROW__IS__DELETED) or (ROW__ID.writeid <= 2L) or ROW__IS__DELETED) and a is not null) (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((ROW__ID.writeid > 2L) and ((not ROW__IS__DELETED) or (ROW__ID.writeid <= 2L) or ROW__IS__DELETED) and a is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)), (ROW__IS__DELETED and (ROW__ID.writeid > 2L)) (type: boolean) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 12 + Map Operator Tree: + TableScan + alias: t3 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 4 Data size: 16 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Map 7 + Map Operator Tree: + TableScan + alias: cmv_basetable_2_n3 + filterExpr: a is not null (type: boolean) + properties: + acid.fetch.deleted.rows TRUE + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: a is not null (type: boolean) + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 2 Data size: 232 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)) + Execution mode: vectorized, llap + LLAP IO: may be used (ACID table) + Reducer 10 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0) + keys: KEY._col0 (type: int), KEY._col1 (type: decimal(10,2)) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint) + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Right Outer Join 0 to 1 + keys: + 0 _col0 (type: int), _col1 (type: decimal(10,2)) + 1 _col0 (type: int), _col1 (type: decimal(10,2)) + nullSafes: [true, true] + outputColumnNames: _col2, _col3, _col4, _col5, _col6, _col7 + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col3 and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col4 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: struct) + null sort order: z + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col3 and ((_col2 is null and (_col7 = 0L)) or (((_col7 + _col2) = 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col4 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: struct) + null sort order: z + sort order: + + Map-reduce partition columns: UDFToInteger(_col0) (type: int) + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: (_col3 and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col5 (type: int), _col6 (type: decimal(10,2)), if(_col2 is null, _col7, (_col7 + _col2)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: bigint) + outputColumnNames: a, c, _c2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c), min(_c2), max(_c2), count(_c2), compute_bit_vector_hll(_c2) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: binary) + Filter Operator + predicate: (_col3 is null and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) + Statistics: Num rows: 1 Data size: 212 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col5 (type: int), _col6 (type: decimal(10,2)), if(_col2 is null, _col7, (_col7 + _col2)) (type: bigint) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + Select Operator + expressions: _col0 (type: int), _col1 (type: decimal(10,2)), _col2 (type: bigint) + outputColumnNames: a, c, _c2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c), min(_c2), max(_c2), count(_c2), compute_bit_vector_hll(_c2) + minReductionHashAggr: 0.4 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary), _col9 (type: bigint), _col10 (type: bigint), _col11 (type: bigint), _col12 (type: binary) + Reducer 3 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + Reducer 4 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: struct) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + Reducer 5 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), min(VALUE._col5), max(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'DECIMAL' (type: string), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), _col9 (type: bigint), _col10 (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 6 + Execution mode: vectorized, llap + Reduce Operator Tree: + Group By Operator + aggregations: min(VALUE._col0), max(VALUE._col1), count(VALUE._col2), count(VALUE._col3), compute_bit_vector_hll(VALUE._col4), min(VALUE._col5), max(VALUE._col6), count(VALUE._col7), compute_bit_vector_hll(VALUE._col8), min(VALUE._col9), max(VALUE._col10), count(VALUE._col11), compute_bit_vector_hll(VALUE._col12) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + Statistics: Num rows: 1 Data size: 712 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: 'LONG' (type: string), UDFToLong(_col0) (type: bigint), UDFToLong(_col1) (type: bigint), (_col2 - _col3) (type: bigint), COALESCE(ndv_compute_bit_vector(_col4),0) (type: bigint), _col4 (type: binary), 'DECIMAL' (type: string), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), (_col2 - _col7) (type: bigint), COALESCE(ndv_compute_bit_vector(_col8),0) (type: bigint), _col8 (type: binary), 'LONG' (type: string), _col9 (type: bigint), _col10 (type: bigint), (_col2 - _col11) (type: bigint), COALESCE(ndv_compute_bit_vector(_col12),0) (type: bigint), _col12 (type: binary) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17 + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 1003 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 8 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + residual filter predicates: {((_col3 > 10) or (_col1 > 10))} + Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 1 Data size: 236 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)), _col2 (type: int), _col4 (type: boolean) + Reducer 9 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 _col0 (type: int) + 1 _col0 (type: int) + outputColumnNames: _col1, _col2, _col4 + Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col2 (type: int), _col1 (type: decimal(10,2)), if(_col4, -1, 1) (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 2 Data size: 240 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: sum(_col2) + keys: _col0 (type: int), _col1 (type: decimal(10,2)) + minReductionHashAggr: 0.5 + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + null sort order: zz + sort order: ++ + Map-reduce partition columns: _col0 (type: int), _col1 (type: decimal(10,2)) + Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col2 (type: bigint) + + Stage: Stage-5 + Dependency Collection + + Stage: Stage-0 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + + Stage: Stage-6 + Stats Work + Basic Stats Work: + + Stage: Stage-10 + Materialized View Update + name: default.mat1 + update creation metadata: true + + Stage: Stage-1 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: DELETE + + Stage: Stage-7 + Stats Work + Basic Stats Work: + + Stage: Stage-2 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + + Stage: Stage-8 + Stats Work + Basic Stats Work: + + Stage: Stage-3 + Move Operator + tables: + replace: false + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.mat1 + Write Type: INSERT + + Stage: Stage-9 + Stats Work + Basic Stats Work: + Column Stats Desc: + Columns: a, c, _c2 + Column Types: int, decimal(10,2), bigint + Table: default.mat1 + +PREHOOK: query: ALTER MATERIALIZED VIEW mat1 REBUILD +PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@mat1 +PREHOOK: Input: default@t3 +PREHOOK: Output: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: ALTER MATERIALIZED VIEW mat1 REBUILD +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@mat1 +POSTHOOK: Input: default@t3 +POSTHOOK: Output: default@mat1 +POSTHOOK: Output: default@mat1 +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: mat1.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: mat1.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] +POSTHOOK: Lineage: mat1.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +POSTHOOK: Lineage: mat1.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] +PREHOOK: query: select * from mat1 +PREHOOK: type: QUERY +PREHOOK: Input: default@mat1 +#### A masked pattern was here #### +POSTHOOK: query: select * from mat1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@mat1 +#### A masked pattern was here #### +2 130.30 1 +1 30.30 6 +PREHOOK: query: drop materialized view mat1 +PREHOOK: type: DROP_MATERIALIZED_VIEW +PREHOOK: Input: default@mat1 +PREHOOK: Output: default@mat1 +POSTHOOK: query: drop materialized view mat1 +POSTHOOK: type: DROP_MATERIALIZED_VIEW +POSTHOOK: Input: default@mat1 +POSTHOOK: Output: default@mat1 +PREHOOK: query: SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, count(*) +FROM cmv_basetable_n6 +JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +JOIN t3 ON (t3.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_n6.c > 10 OR cmv_basetable_2_n3.c > 10 +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c +PREHOOK: type: QUERY +PREHOOK: Input: default@cmv_basetable_2_n3 +PREHOOK: Input: default@cmv_basetable_n6 +PREHOOK: Input: default@t3 +#### A masked pattern was here #### +POSTHOOK: query: SELECT cmv_basetable_n6.a, cmv_basetable_2_n3.c, count(*) +FROM cmv_basetable_n6 +JOIN cmv_basetable_2_n3 ON (cmv_basetable_n6.a = cmv_basetable_2_n3.a) +JOIN t3 ON (t3.a = cmv_basetable_2_n3.a) +WHERE cmv_basetable_n6.c > 10 OR cmv_basetable_2_n3.c > 10 +group by cmv_basetable_n6.a, cmv_basetable_2_n3.c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@cmv_basetable_2_n3 +POSTHOOK: Input: default@cmv_basetable_n6 +POSTHOOK: Input: default@t3 +#### A masked pattern was here #### +2 130.30 1 +1 30.30 6 diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out index 6fa072efb478..3f7007a1501f 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_8.q.out @@ -7,6 +7,8 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_basetable_n6 PREHOOK: query: insert into cmv_basetable_n6 values + (1, 'alfred', 10.30, 2), + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), @@ -16,6 +18,8 @@ PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: default@cmv_basetable_n6 POSTHOOK: query: insert into cmv_basetable_n6 values + (1, 'alfred', 10.30, 2), + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (2, 'bob', 3.14, 3), (2, 'bonnie', 172342.2, 3), @@ -37,12 +41,14 @@ POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@cmv_basetable_2_n3 PREHOOK: query: insert into cmv_basetable_2_n3 values + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3) PREHOOK: type: QUERY PREHOOK: Input: _dummy_database@_dummy_table PREHOOK: Output: default@cmv_basetable_2_n3 POSTHOOK: query: insert into cmv_basetable_2_n3 values + (1, 'alfred', 10.30, 2), (1, 'alfred', 10.30, 2), (3, 'calvin', 978.76, 3) POSTHOOK: type: QUERY @@ -129,6 +135,11 @@ POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 #### A masked pattern was here #### 1 +1 +1 +1 +1 +1 3 3 3 @@ -166,20 +177,20 @@ STAGE PLANS: TableScan alias: cmv_basetable_n6 filterExpr: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: a is not null (type: boolean) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator expressions: a (type: int) outputColumnNames: _col0 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 4 @@ -189,7 +200,7 @@ STAGE PLANS: filterExpr: ((ROW__ID.writeid > 1L) and (c > 10) and a is not null) (type: boolean) properties: insertonly.fetch.bucketid TRUE - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((ROW__ID.writeid > 1L) and (c > 10) and a is not null) (type: boolean) Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE @@ -344,6 +355,11 @@ POSTHOOK: Input: default@cmv_basetable_n6 POSTHOOK: Input: default@cmv_mat_view_n6 #### A masked pattern was here #### 1 +1 +1 +1 +1 +1 3 3 3 @@ -367,101 +383,70 @@ ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD PREHOOK: Input: default@cmv_basetable_2_n3 PREHOOK: Input: default@cmv_basetable_n6 -PREHOOK: Input: default@cmv_mat_view_n6 -PREHOOK: Output: default@cmv_mat_view_n6 PREHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: query: EXPLAIN ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 -POSTHOOK: Input: default@cmv_mat_view_n6 -POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Output: default@cmv_mat_view_n6 STAGE DEPENDENCIES: - Stage-2 is a root stage - Stage-3 depends on stages: Stage-2 - Stage-0 depends on stages: Stage-3 - Stage-4 depends on stages: Stage-0 - Stage-6 depends on stages: Stage-4, Stage-5 - Stage-1 depends on stages: Stage-3 - Stage-5 depends on stages: Stage-1 + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + Stage-3 depends on stages: Stage-0 + Stage-4 depends on stages: Stage-3 STAGE PLANS: - Stage: Stage-2 + Stage: Stage-1 Tez #### A masked pattern was here #### Edges: - Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 6 (SIMPLE_EDGE) - Reducer 3 <- Reducer 2 (SIMPLE_EDGE) - Reducer 4 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) - Reducer 6 <- Map 5 (SIMPLE_EDGE), Map 7 (SIMPLE_EDGE) + Reducer 2 <- Map 1 (SIMPLE_EDGE), Map 4 (SIMPLE_EDGE) + Reducer 3 <- Reducer 2 (CUSTOM_SIMPLE_EDGE) #### A masked pattern was here #### Vertices: Map 1 - Map Operator Tree: - TableScan - alias: default.cmv_mat_view_n6 - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: a (type: int), ROW__ID (type: struct) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 5 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 5 Data size: 400 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: struct) - Execution mode: vectorized, llap - LLAP IO: may be used (ACID table) - Map 5 Map Operator Tree: TableScan alias: cmv_basetable_n6 - filterExpr: ((ROW__ID.writeid > 1L) and a is not null) (type: boolean) - properties: - acid.fetch.deleted.rows TRUE - Statistics: Num rows: 5 Data size: 20 Basic stats: COMPLETE Column stats: COMPLETE + filterExpr: a is not null (type: boolean) + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((ROW__ID.writeid > 1L) and a is not null) (type: boolean) - Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + predicate: a is not null (type: boolean) + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), ROW__IS__DELETED (type: boolean) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: boolean) + Statistics: Num rows: 7 Data size: 28 Basic stats: COMPLETE Column stats: COMPLETE Execution mode: vectorized, llap LLAP IO: may be used (ACID table) - Map 7 + Map 4 Map Operator Tree: TableScan alias: cmv_basetable_2_n3 filterExpr: ((c > 10) and a is not null) (type: boolean) - properties: - acid.fetch.deleted.rows TRUE - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: ((c > 10) and a is not null) (type: boolean) - Statistics: Num rows: 3 Data size: 348 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: int), c (type: decimal(10,2)), ROW__IS__DELETED (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: int), c (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: int) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 3 Data size: 360 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) + Statistics: Num rows: 4 Data size: 464 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: decimal(10,2)) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -469,74 +454,41 @@ STAGE PLANS: Reduce Operator Tree: Merge Join Operator condition map: - Right Outer Join 0 to 1 + Inner Join 0 to 1 keys: 0 _col0 (type: int) 1 _col0 (type: int) - nullSafes: [true] - outputColumnNames: _col1, _col2, _col3, _col4 - Statistics: Num rows: 2 Data size: 392 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: _col4 (type: boolean) - Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col1 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: struct) - null sort order: z - sort order: + - Map-reduce partition columns: UDFToInteger(_col0) (type: int) - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - Filter Operator - predicate: (not _col4) (type: boolean) - Statistics: Num rows: 1 Data size: 196 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col2 + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: _col0 (type: int), _col2 (type: decimal(10,2)) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.cmv_mat_view_n6 + Write Type: INSERT Select Operator - expressions: _col2 (type: int), _col3 (type: decimal(10,2)) - outputColumnNames: _col0, _col1 - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: INSERT - Select Operator - expressions: _col0 (type: int), _col1 (type: decimal(10,2)) - outputColumnNames: a, c - Statistics: Num rows: 1 Data size: 116 Basic stats: COMPLETE Column stats: COMPLETE - Group By Operator - aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) - minReductionHashAggr: 0.4 - mode: hash - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + expressions: _col0 (type: int), _col1 (type: decimal(10,2)) + outputColumnNames: a, c + Statistics: Num rows: 9 Data size: 1044 Basic stats: COMPLETE Column stats: COMPLETE + Group By Operator + aggregations: min(a), max(a), count(1), count(a), compute_bit_vector_hll(a), min(c), max(c), count(c), compute_bit_vector_hll(c) + minReductionHashAggr: 0.8888889 + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - null sort order: - sort order: - Statistics: Num rows: 1 Data size: 544 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary) + value expressions: _col0 (type: int), _col1 (type: int), _col2 (type: bigint), _col3 (type: bigint), _col4 (type: binary), _col5 (type: decimal(10,2)), _col6 (type: decimal(10,2)), _col7 (type: bigint), _col8 (type: binary) Reducer 3 - Execution mode: vectorized, llap - Reduce Operator Tree: - Select Operator - expressions: KEY.reducesinkkey0 (type: struct) - outputColumnNames: _col0 - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - File Output Operator - compressed: false - Statistics: Num rows: 1 Data size: 76 Basic stats: COMPLETE Column stats: COMPLETE - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: DELETE - Reducer 4 Execution mode: vectorized, llap Reduce Operator Tree: Group By Operator @@ -555,53 +507,11 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe - Reducer 6 - Execution mode: llap - Reduce Operator Tree: - Merge Join Operator - condition map: - Inner Join 0 to 1 - keys: - 0 _col0 (type: int) - 1 _col0 (type: int) - outputColumnNames: _col0, _col1, _col3, _col4 - Statistics: Num rows: 1 Data size: 124 Basic stats: COMPLETE Column stats: COMPLETE - Select Operator - expressions: _col0 (type: int), _col3 (type: decimal(10,2)), (_col1 or _col4) (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - Reduce Output Operator - key expressions: _col0 (type: int) - null sort order: z - sort order: + - Map-reduce partition columns: _col0 (type: int) - Statistics: Num rows: 1 Data size: 120 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: decimal(10,2)), _col2 (type: boolean) - Stage: Stage-3 + Stage: Stage-2 Dependency Collection Stage: Stage-0 - Move Operator - tables: - replace: false - table: - input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat - output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat - serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde - name: default.cmv_mat_view_n6 - Write Type: DELETE - - Stage: Stage-4 - Stats Work - Basic Stats Work: - - Stage: Stage-6 - Materialized View Update - name: default.cmv_mat_view_n6 - update creation metadata: true - - Stage: Stage-1 Move Operator tables: replace: false @@ -612,7 +522,7 @@ STAGE PLANS: name: default.cmv_mat_view_n6 Write Type: INSERT - Stage: Stage-5 + Stage: Stage-3 Stats Work Basic Stats Work: Column Stats Desc: @@ -620,19 +530,20 @@ STAGE PLANS: Column Types: int, decimal(10,2) Table: default.cmv_mat_view_n6 + Stage: Stage-4 + Materialized View Update + name: default.cmv_mat_view_n6 + update creation metadata: true + PREHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD PREHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD PREHOOK: Input: default@cmv_basetable_2_n3 PREHOOK: Input: default@cmv_basetable_n6 -PREHOOK: Input: default@cmv_mat_view_n6 -PREHOOK: Output: default@cmv_mat_view_n6 PREHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: query: ALTER MATERIALIZED VIEW cmv_mat_view_n6 REBUILD POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REBUILD POSTHOOK: Input: default@cmv_basetable_2_n3 POSTHOOK: Input: default@cmv_basetable_n6 -POSTHOOK: Input: default@cmv_mat_view_n6 -POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Output: default@cmv_mat_view_n6 POSTHOOK: Lineage: cmv_mat_view_n6.a SIMPLE [(cmv_basetable_n6)cmv_basetable_n6.FieldSchema(name:a, type:int, comment:null), ] POSTHOOK: Lineage: cmv_mat_view_n6.c SIMPLE [(cmv_basetable_2_n3)cmv_basetable_2_n3.FieldSchema(name:c, type:decimal(10,2), comment:null), ] diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_9.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_9.q.out index 69e1a97dd6e5..69fc069a4991 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_9.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_create_rewrite_9.q.out @@ -251,12 +251,12 @@ HiveProject(a0=[$4], $f1=[CASE(IS NULL($1), $5, IS NULL($5), $1, +($5, $1))], $f HiveTableScan(table=[[default, mat1]], table:alias=[default.mat1]) HiveProject(a=[$0], $f1=[$1], $f2=[$2]) HiveAggregate(group=[{0}], agg#0=[SUM($1)], agg#1=[SUM($2)]) - HiveProject(a=[$0], $f3=[CASE(OR($2, $5), *(-1, $1), $1)], $f4=[CASE(OR($2, $5), -1, 1)]) - HiveJoin(condition=[AND(=($0, $4), OR($3, $6))], joinType=[inner], algorithm=[none], cost=[not available]) - HiveProject(a=[$0], b=[$1], ROW__IS__DELETED=[$6], <=[<(3, $5.writeid)]) + HiveProject(a=[$0], $f4=[CASE(OR($3, $7), *(-1, $1), $1)], $f5=[CASE(OR($3, $7), -1, 1)]) + HiveJoin(condition=[AND(=($0, $5), OR(AND(NOT($3), NOT($7)), AND(NOT($4), NOT($8))), OR(<(3, $2.writeid), <(3, $6.writeid)))], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(a=[$0], b=[$1], ROW__ID=[$5], _deleted=[AND($6, <(3, $5.writeid))], _inserted=[AND(<(3, $5.writeid), NOT($6))]) HiveFilter(condition=[IS NOT NULL($0)]) HiveTableScan(table=[[default, t1]], table:alias=[t1]) - HiveProject(a=[$0], ROW__IS__DELETED=[$5], <=[<(3, $4.writeid)]) + HiveProject(a=[$0], ROW__ID=[$4], _deleted=[AND($5, <(3, $4.writeid))], _inserted=[AND(<(3, $4.writeid), NOT($5))]) HiveFilter(condition=[IS NOT NULL($0)]) HiveTableScan(table=[[default, t2]], table:alias=[t2]) @@ -333,16 +333,16 @@ STAGE PLANS: predicate: a is not null (type: boolean) Statistics: Num rows: 9 Data size: 837 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: char(15)), ROW__IS__DELETED (type: boolean), (ROW__ID.writeid > 3L) (type: boolean) - outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 9 Data size: 909 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: char(15)), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 3L)) (type: boolean), ((ROW__ID.writeid > 3L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 9 Data size: 1593 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 9 Data size: 909 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: boolean), _col2 (type: boolean) + Statistics: Num rows: 9 Data size: 1593 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: struct), _col2 (type: boolean), _col3 (type: boolean) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Map 7 @@ -357,16 +357,16 @@ STAGE PLANS: predicate: a is not null (type: boolean) Statistics: Num rows: 7 Data size: 671 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: a (type: char(15)), b (type: int), ROW__IS__DELETED (type: boolean), (ROW__ID.writeid > 3L) (type: boolean) - outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 7 Data size: 727 Basic stats: COMPLETE Column stats: COMPLETE + expressions: a (type: char(15)), b (type: int), ROW__ID (type: struct), (ROW__IS__DELETED and (ROW__ID.writeid > 3L)) (type: boolean), ((ROW__ID.writeid > 3L) and (not ROW__IS__DELETED)) (type: boolean) + outputColumnNames: _col0, _col1, _col2, _col3, _col4 + Statistics: Num rows: 7 Data size: 1259 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 7 Data size: 727 Basic stats: COMPLETE Column stats: COMPLETE - value expressions: _col1 (type: int), _col2 (type: boolean), _col3 (type: boolean) + Statistics: Num rows: 7 Data size: 1259 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: int), _col2 (type: struct), _col3 (type: boolean), _col4 (type: boolean) Execution mode: vectorized, llap LLAP IO: may be used (ACID table) Reducer 2 @@ -380,7 +380,7 @@ STAGE PLANS: 1 _col0 (type: char(15)) nullSafes: [true] outputColumnNames: _col1, _col2, _col3, _col4, _col5, _col6, _col7 - Statistics: Num rows: 5 Data size: 1017 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 2 Data size: 402 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator predicate: (_col3 and ((_col2 is null and (_col7 > 0L)) or (((_col7 + _col2) > 0) and _col2 is not null))) (type: boolean) Statistics: Num rows: 1 Data size: 205 Basic stats: COMPLETE Column stats: COMPLETE @@ -548,26 +548,26 @@ STAGE PLANS: keys: 0 _col0 (type: char(15)) 1 _col0 (type: char(15)) - outputColumnNames: _col0, _col1, _col2, _col3, _col5, _col6 - residual filter predicates: {(_col3 or _col6)} - Statistics: Num rows: 6 Data size: 670 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col6, _col7, _col8 + residual filter predicates: {(((not _col3) and (not _col7)) or ((not _col4) and (not _col8)))} {((_col2.writeid > 3L) or (_col6.writeid > 3L))} + Statistics: Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: char(15)), if((_col2 or _col5), (-1 * _col1), _col1) (type: int), if((_col2 or _col5), -1, 1) (type: int) + expressions: _col0 (type: char(15)), if((_col3 or _col7), (-1 * _col1), _col1) (type: int), if((_col3 or _col7), -1, 1) (type: int) outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 6 Data size: 670 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 265 Basic stats: COMPLETE Column stats: COMPLETE Group By Operator aggregations: sum(_col1), sum(_col2) keys: _col0 (type: char(15)) minReductionHashAggr: 0.4 mode: hash outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 545 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 5 Data size: 545 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint), _col2 (type: bigint) Reducer 9 Execution mode: vectorized, llap @@ -577,13 +577,13 @@ STAGE PLANS: keys: KEY._col0 (type: char(15)) mode: mergepartial outputColumnNames: _col0, _col1, _col2 - Statistics: Num rows: 5 Data size: 545 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE Reduce Output Operator key expressions: _col0 (type: char(15)) null sort order: z sort order: + Map-reduce partition columns: _col0 (type: char(15)) - Statistics: Num rows: 5 Data size: 545 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 109 Basic stats: COMPLETE Column stats: COMPLETE value expressions: _col1 (type: bigint), _col2 (type: bigint) Stage: Stage-5 @@ -672,10 +672,10 @@ POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 POSTHOOK: Output: default@mat1 POSTHOOK: Output: default@mat1 -POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:int, comment:null), ] -POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:b, type:int, comment:null), ] -POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), ] -POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), ] +POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:int, comment:null), ] +POSTHOOK: Lineage: mat1._c1 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c1, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), (t1)t1.FieldSchema(name:b, type:int, comment:null), ] +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: mat1._c2 EXPRESSION [(mat1)default.mat1.FieldSchema(name:_c2, type:bigint, comment:null), (t1)t1.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t1)t1.FieldSchema(name:ROW__ID, type:struct, comment:), (t2)t2.FieldSchema(name:ROW__IS__DELETED, type:boolean, comment:), (t2)t2.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:char(15), comment:null), ] POSTHOOK: Lineage: mat1.a SIMPLE [(t1)t1.FieldSchema(name:a, type:char(15), comment:null), ] PREHOOK: query: explain cbo diff --git a/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_no_cbo_rewrite.q.out similarity index 87% rename from ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out rename to ql/src/test/results/clientpositive/llap/materialized_view_no_cbo_rewrite.q.out index 9083c736d640..4cba8933ab55 100644 --- a/ql/src/test/results/clientnegative/materialized_view_no_cbo_rewrite_2.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_no_cbo_rewrite.q.out @@ -36,4 +36,8 @@ PREHOOK: query: alter materialized view cmv_mat_view enable rewrite PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE PREHOOK: Input: default@cmv_mat_view PREHOOK: Output: default@cmv_mat_view -FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. Statement has unsupported clause: sort by. +Only query text based automatic rewriting is available for materialized view. Statement has unsupported clause: sort by. +POSTHOOK: query: alter materialized view cmv_mat_view enable rewrite +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE +POSTHOOK: Input: default@cmv_mat_view +POSTHOOK: Output: default@cmv_mat_view diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_10.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_10.q.out new file mode 100644 index 000000000000..3455a1fe9ee6 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_10.q.out @@ -0,0 +1,40 @@ +PREHOOK: query: CREATE TABLE EMPS (ENAME STRING, BIRTH_EPOCH_SECS INT) STORED AS ORC TBLPROPERTIES ('transactional'='true') +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@EMPS +POSTHOOK: query: CREATE TABLE EMPS (ENAME STRING, BIRTH_EPOCH_SECS INT) STORED AS ORC TBLPROPERTIES ('transactional'='true') +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@EMPS +unix_timestamp(void) is deprecated. Use current_timestamp instead. +unix_timestamp(void) is deprecated. Use current_timestamp instead. +Cannot enable automatic rewriting for materialized view. UNIX_TIMESTAMP is not a deterministic function +PREHOOK: query: CREATE MATERIALIZED VIEW v_emp AS SELECT * FROM EMPS WHERE BIRTH_EPOCH_SECS <= UNIX_TIMESTAMP() +PREHOOK: type: CREATE_MATERIALIZED_VIEW +PREHOOK: Input: default@emps +PREHOOK: Output: database:default +PREHOOK: Output: default@v_emp +POSTHOOK: query: CREATE MATERIALIZED VIEW v_emp AS SELECT * FROM EMPS WHERE BIRTH_EPOCH_SECS <= UNIX_TIMESTAMP() +POSTHOOK: type: CREATE_MATERIALIZED_VIEW +POSTHOOK: Input: default@emps +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v_emp +POSTHOOK: Lineage: v_emp.birth_epoch_secs SIMPLE [(emps)emps.FieldSchema(name:birth_epoch_secs, type:int, comment:null), ] +POSTHOOK: Lineage: v_emp.ename SIMPLE [(emps)emps.FieldSchema(name:ename, type:string, comment:null), ] +unix_timestamp(void) is deprecated. Use current_timestamp instead. +unix_timestamp(void) is deprecated. Use current_timestamp instead. +PREHOOK: query: explain cbo +SELECT * FROM EMPS WHERE BIRTH_EPOCH_SECS <= UNIX_TIMESTAMP() +PREHOOK: type: QUERY +PREHOOK: Input: default@emps +#### A masked pattern was here #### +POSTHOOK: query: explain cbo +SELECT * FROM EMPS WHERE BIRTH_EPOCH_SECS <= UNIX_TIMESTAMP() +POSTHOOK: type: QUERY +POSTHOOK: Input: default@emps +#### A masked pattern was here #### +CBO PLAN: +HiveProject(ename=[$0], birth_epoch_secs=[$1]) + HiveFilter(condition=[<=(CAST($1):BIGINT, UNIX_TIMESTAMP())]) + HiveTableScan(table=[[default, emps]], table:alias=[emps]) + diff --git a/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_11.q.out similarity index 87% rename from ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out rename to ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_11.q.out index bb771614eec0..bd12d44dc3b9 100644 --- a/ql/src/test/results/clientnegative/materialized_view_no_supported_op_rewrite_2.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_11.q.out @@ -35,4 +35,8 @@ PREHOOK: query: alter materialized view cmv_mat_view enable rewrite PREHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE PREHOOK: Input: default@cmv_mat_view PREHOOK: Output: default@cmv_mat_view -FAILED: Execution Error, return code 40000 from org.apache.hadoop.hive.ql.ddl.DDLTask. org.apache.hadoop.hive.ql.metadata.HiveException: Cannot enable rewriting for materialized view. LEFT join type is not supported by rewriting algorithm. +Only query text based automatic rewriting is available for materialized view. Statement has unsupported join type: LEFT. +POSTHOOK: query: alter materialized view cmv_mat_view enable rewrite +POSTHOOK: type: ALTER_MATERIALIZED_VIEW_REWRITE +POSTHOOK: Input: default@cmv_mat_view +POSTHOOK: Output: default@cmv_mat_view diff --git a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_8.q.out b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_8.q.out index fa31a16a2650..e003990db64a 100644 --- a/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_8.q.out +++ b/ql/src/test/results/clientpositive/llap/materialized_view_rewrite_by_text_8.q.out @@ -8,7 +8,7 @@ POSTHOOK: query: create table t1(col0 int) STORED AS ORC POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default POSTHOOK: Output: default@t1 -Only query text based automatic rewriting is available for materialized view. LEFT join type is not supported by rewriting algorithm. +Only query text based automatic rewriting is available for materialized view. Statement has unsupported join type: LEFT. Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: create materialized view mat1 as select l.col0 from t1 l left outer join t1 r on (l.col0 = r.col0) where l.col0 = 20 @@ -23,7 +23,7 @@ POSTHOOK: Input: default@t1 POSTHOOK: Output: database:default POSTHOOK: Output: default@mat1 POSTHOOK: Lineage: mat1.col0 SIMPLE [] -Only query text based automatic rewriting is available for materialized view. LEFT join type is not supported by rewriting algorithm. +Only query text based automatic rewriting is available for materialized view. Statement has unsupported join type: LEFT. Warning: Shuffle Join MERGEJOIN[23][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product PREHOOK: query: create materialized view mat2 as select col0 from diff --git a/ql/src/test/results/clientpositive/llap/not_in_scenarios.q.out b/ql/src/test/results/clientpositive/llap/not_in_scenarios.q.out new file mode 100644 index 000000000000..3ca9fa6edd6a --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/not_in_scenarios.q.out @@ -0,0 +1,77 @@ +PREHOOK: query: CREATE TABLE my_tbl (id int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@my_tbl +POSTHOOK: query: CREATE TABLE my_tbl (id int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@my_tbl +PREHOOK: query: insert into my_tbl values (100),(200),(300) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@my_tbl +POSTHOOK: query: insert into my_tbl values (100),(200),(300) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@my_tbl +POSTHOOK: Lineage: my_tbl.id SCRIPT [] +PREHOOK: query: select * from my_tbl where id not in ('ABC', 'DEF') +PREHOOK: type: QUERY +PREHOOK: Input: default@my_tbl +#### A masked pattern was here #### +POSTHOOK: query: select * from my_tbl where id not in ('ABC', 'DEF') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_tbl +#### A masked pattern was here #### +100 +200 +300 +PREHOOK: query: select * from my_tbl where id not in ('ABC', 'DEF', '123') +PREHOOK: type: QUERY +PREHOOK: Input: default@my_tbl +#### A masked pattern was here #### +POSTHOOK: query: select * from my_tbl where id not in ('ABC', 'DEF', '123') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_tbl +#### A masked pattern was here #### +100 +200 +300 +PREHOOK: query: select * from my_tbl where id not in ('ABC', 'DEF', '100') +PREHOOK: type: QUERY +PREHOOK: Input: default@my_tbl +#### A masked pattern was here #### +POSTHOOK: query: select * from my_tbl where id not in ('ABC', 'DEF', '100') +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_tbl +#### A masked pattern was here #### +200 +300 +PREHOOK: query: select * from my_tbl where id not in (100, 'ABC', 200) +PREHOOK: type: QUERY +PREHOOK: Input: default@my_tbl +#### A masked pattern was here #### +POSTHOOK: query: select * from my_tbl where id not in (100, 'ABC', 200) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_tbl +#### A masked pattern was here #### +300 +PREHOOK: query: select * from my_tbl where id is not null or id in ("ABC") +PREHOOK: type: QUERY +PREHOOK: Input: default@my_tbl +#### A masked pattern was here #### +POSTHOOK: query: select * from my_tbl where id is not null or id in ("ABC") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_tbl +#### A masked pattern was here #### +100 +200 +300 +PREHOOK: query: select * from my_tbl where id is not null and id in ("ABC") +PREHOOK: type: QUERY +PREHOOK: Input: default@my_tbl +#### A masked pattern was here #### +POSTHOOK: query: select * from my_tbl where id is not null and id in ("ABC") +POSTHOOK: type: QUERY +POSTHOOK: Input: default@my_tbl +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out index 7d28a27438fd..49210d080bb1 100644 --- a/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out +++ b/ql/src/test/results/clientpositive/llap/orc_llap_counters.q.out @@ -922,12 +922,12 @@ Stage-1 HIVE COUNTERS: RECORDS_OUT_0: 1 RECORDS_OUT_INTERMEDIATE_Map_1: 0 RECORDS_OUT_INTERMEDIATE_Reducer_2: 0 - RECORDS_OUT_OPERATOR_FIL_7: 0 - RECORDS_OUT_OPERATOR_FS_9: 1 - RECORDS_OUT_OPERATOR_GBY_8: 1 + RECORDS_OUT_OPERATOR_FIL_8: 0 + RECORDS_OUT_OPERATOR_FS_12: 1 + RECORDS_OUT_OPERATOR_GBY_11: 1 RECORDS_OUT_OPERATOR_MAP_0: 0 - RECORDS_OUT_OPERATOR_RS_3: 0 - RECORDS_OUT_OPERATOR_SEL_2: 0 + RECORDS_OUT_OPERATOR_RS_10: 0 + RECORDS_OUT_OPERATOR_SEL_9: 0 RECORDS_OUT_OPERATOR_TS_0: 0 TOTAL_TABLE_ROWS_WRITTEN: 0 Stage-1 LLAP IO COUNTERS: diff --git a/ql/src/test/results/clientpositive/llap/partition_timestamp3.q.out b/ql/src/test/results/clientpositive/llap/partition_timestamp3.q.out new file mode 100644 index 000000000000..847ec070fabd --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/partition_timestamp3.q.out @@ -0,0 +1,48 @@ +PREHOOK: query: DROP TABLE IF EXISTS payments +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: DROP TABLE IF EXISTS payments +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: CREATE EXTERNAL TABLE payments (card string) PARTITIONED BY(txn_datetime TIMESTAMP) STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@payments +POSTHOOK: query: CREATE EXTERNAL TABLE payments (card string) PARTITIONED BY(txn_datetime TIMESTAMP) STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@payments +PREHOOK: query: INSERT into payments VALUES('3333-4444-2222-9999', '2023-03-26 02:30:00'), ('3333-4444-2222-9999', '2023-03-26 03:30:00') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@payments +POSTHOOK: query: INSERT into payments VALUES('3333-4444-2222-9999', '2023-03-26 02:30:00'), ('3333-4444-2222-9999', '2023-03-26 03:30:00') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@payments +POSTHOOK: Output: default@payments@txn_datetime=2023-03-26 02%3A30%3A00 +POSTHOOK: Output: default@payments@txn_datetime=2023-03-26 03%3A30%3A00 +POSTHOOK: Lineage: payments PARTITION(txn_datetime=2023-03-26 02:30:00).card SCRIPT [] +POSTHOOK: Lineage: payments PARTITION(txn_datetime=2023-03-26 03:30:00).card SCRIPT [] +PREHOOK: query: SELECT * FROM payments WHERE txn_datetime = '2023-03-26 02:30:00' +PREHOOK: type: QUERY +PREHOOK: Input: default@payments +PREHOOK: Input: default@payments@txn_datetime=2023-03-26 02%3A30%3A00 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM payments WHERE txn_datetime = '2023-03-26 02:30:00' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@payments +POSTHOOK: Input: default@payments@txn_datetime=2023-03-26 02%3A30%3A00 +#### A masked pattern was here #### +3333-4444-2222-9999 2023-03-26 02:30:00 +PREHOOK: query: SELECT * FROM payments WHERE txn_datetime = '2023-03-26 03:30:00' +PREHOOK: type: QUERY +PREHOOK: Input: default@payments +PREHOOK: Input: default@payments@txn_datetime=2023-03-26 03%3A30%3A00 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM payments WHERE txn_datetime = '2023-03-26 03:30:00' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@payments +POSTHOOK: Input: default@payments@txn_datetime=2023-03-26 03%3A30%3A00 +#### A masked pattern was here #### +3333-4444-2222-9999 2023-03-26 03:30:00 diff --git a/ql/src/test/results/clientpositive/llap/ppd_transform.q.out b/ql/src/test/results/clientpositive/llap/ppd_transform.q.out index 9a0f487d0f66..1505011626c8 100644 --- a/ql/src/test/results/clientpositive/llap/ppd_transform.q.out +++ b/ql/src/test/results/clientpositive/llap/ppd_transform.q.out @@ -384,8 +384,7 @@ FROM ( PREHOOK: type: QUERY PREHOOK: Input: cat PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### -PREHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### POSTHOOK: query: EXPLAIN FROM ( FROM ( SELECT * FROM src ) mapout REDUCE * USING 'cat' AS x,y @@ -394,8 +393,7 @@ FROM ( POSTHOOK: type: QUERY POSTHOOK: Input: cat POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### -POSTHOOK: Output: hdfs://### HDFS PATH ### +#### A masked pattern was here #### STAGE DEPENDENCIES: Stage-2 is a root stage Stage-3 depends on stages: Stage-2 diff --git a/ql/src/test/results/clientpositive/llap/ptf_register_use.q.out b/ql/src/test/results/clientpositive/llap/ptf_register_use.q.out new file mode 100644 index 000000000000..3a7884306612 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/ptf_register_use.q.out @@ -0,0 +1,84 @@ +PREHOOK: query: explain select ptf_register_use_func() over() +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: explain select ptf_register_use_func() over() +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: 0 (type: int) + null sort order: a + sort order: + + Map-reduce partition columns: 0 (type: int) + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: llap + LLAP IO: no inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + output shape: + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: 0 ASC NULLS FIRST + partition by: 0 + raw input shape: + window functions: + window function definition + alias: ptf_register_use_func_window_0 + name: ptf_register_use_func + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: ptf_register_use_func_window_0 (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select ptf_register_use_func() over() +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: select ptf_register_use_func() over() +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 diff --git a/ql/src/test/results/clientpositive/llap/rename_table.q.out b/ql/src/test/results/clientpositive/llap/rename_table.q.out new file mode 100644 index 000000000000..014d629ea868 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/rename_table.q.out @@ -0,0 +1,378 @@ +PREHOOK: query: CREATE TABLE rename_partition_table0 (key STRING, value STRING) PARTITIONED BY (part STRING) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@rename_partition_table0 +POSTHOOK: query: CREATE TABLE rename_partition_table0 (key STRING, value STRING) PARTITIONED BY (part STRING) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@rename_partition_table0 +PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table0 PARTITION (part = '1') SELECT * FROM src where rand(1) < 0.5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@rename_partition_table0@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE rename_partition_table0 PARTITION (part = '1') SELECT * FROM src where rand(1) < 0.5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@rename_partition_table0@part=1 +POSTHOOK: Lineage: rename_partition_table0 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: rename_partition_table0 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE rename_partition_table0 ADD COLUMNS (new_col INT) +PREHOOK: type: ALTERTABLE_ADDCOLS +PREHOOK: Input: default@rename_partition_table0 +PREHOOK: Output: default@rename_partition_table0 +POSTHOOK: query: ALTER TABLE rename_partition_table0 ADD COLUMNS (new_col INT) +POSTHOOK: type: ALTERTABLE_ADDCOLS +POSTHOOK: Input: default@rename_partition_table0 +POSTHOOK: Output: default@rename_partition_table0 +PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table0 PARTITION (part = '2') SELECT src.*, 1 FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@rename_partition_table0@part=2 +POSTHOOK: query: INSERT OVERWRITE TABLE rename_partition_table0 PARTITION (part = '2') SELECT src.*, 1 FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@rename_partition_table0@part=2 +POSTHOOK: Lineage: rename_partition_table0 PARTITION(part=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: rename_partition_table0 PARTITION(part=2).new_col SIMPLE [] +POSTHOOK: Lineage: rename_partition_table0 PARTITION(part=2).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE rename_partition_table0 RENAME TO rename_partition_table1 +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: default@rename_partition_table0 +PREHOOK: Output: database:default +PREHOOK: Output: default@rename_partition_table0 +PREHOOK: Output: default@rename_partition_table1 +POSTHOOK: query: ALTER TABLE rename_partition_table0 RENAME TO rename_partition_table1 +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: default@rename_partition_table0 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@rename_partition_table0 +POSTHOOK: Output: default@rename_partition_table1 +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table1 +# col_name data_type comment +key string +value string +new_col int + +# Partition Information +# col_name data_type comment +part string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: MANAGED_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + bucketing_version 2 +#### A masked pattern was here #### + numFiles 2 + numPartitions 2 + numRows 746 + rawDataSize 133296 + totalSize 4760 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='1') key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='1') key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table1 +col_name key +data_type string +min +max +num_nulls 0 +distinct_count 198 +avg_col_len 2.8089430894308944 +max_col_len 3 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='1') value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='1') value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table1 +col_name value +data_type string +min +max +num_nulls 0 +distinct_count 191 +avg_col_len 6.808943089430894 +max_col_len 7 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table1 +col_name key +data_type string +min +max +num_nulls 0 +distinct_count 316 +avg_col_len 2.812 +max_col_len 3 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') value +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') value +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table1 +col_name value +data_type string +min +max +num_nulls 0 +distinct_count 307 +avg_col_len 6.812 +max_col_len 7 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') new_col +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table1 PARTITION (part='2') new_col +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table1 +col_name new_col +data_type int +min 1 +max 1 +num_nulls 0 +distinct_count 1 +avg_col_len +max_col_len +num_trues +num_falses +bit_vector HL +histogram Q1: 1, Q2: 1, Q3: 1 +comment from deserializer +PREHOOK: query: CREATE EXTERNAL TABLE rename_partition_table_ext0 (key STRING, value STRING) PARTITIONED BY (part STRING) +STORED AS ORC +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@rename_partition_table_ext0 +POSTHOOK: query: CREATE EXTERNAL TABLE rename_partition_table_ext0 (key STRING, value STRING) PARTITIONED BY (part STRING) +STORED AS ORC +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@rename_partition_table_ext0 +PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_ext0 PARTITION (part = '1') SELECT * FROM src where rand(1) < 0.5 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@rename_partition_table_ext0@part=1 +POSTHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_ext0 PARTITION (part = '1') SELECT * FROM src where rand(1) < 0.5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@rename_partition_table_ext0@part=1 +POSTHOOK: Lineage: rename_partition_table_ext0 PARTITION(part=1).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: rename_partition_table_ext0 PARTITION(part=1).value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE rename_partition_table_ext0 CHANGE COLUMN value val STRING CASCADE +PREHOOK: type: ALTERTABLE_RENAMECOL +PREHOOK: Input: default@rename_partition_table_ext0 +PREHOOK: Output: default@rename_partition_table_ext0 +PREHOOK: Output: default@rename_partition_table_ext0@part=1 +POSTHOOK: query: ALTER TABLE rename_partition_table_ext0 CHANGE COLUMN value val STRING CASCADE +POSTHOOK: type: ALTERTABLE_RENAMECOL +POSTHOOK: Input: default@rename_partition_table_ext0 +POSTHOOK: Output: default@rename_partition_table_ext0 +POSTHOOK: Output: default@rename_partition_table_ext0@part=1 +PREHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_ext0 PARTITION (part = '2') SELECT * FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@rename_partition_table_ext0@part=2 +POSTHOOK: query: INSERT OVERWRITE TABLE rename_partition_table_ext0 PARTITION (part = '2') SELECT * FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@rename_partition_table_ext0@part=2 +POSTHOOK: Lineage: rename_partition_table_ext0 PARTITION(part=2).key SIMPLE [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: rename_partition_table_ext0 PARTITION(part=2).val SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ALTER TABLE rename_partition_table_ext0 RENAME TO rename_partition_table_ext1 +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: default@rename_partition_table_ext0 +PREHOOK: Output: database:default +PREHOOK: Output: default@rename_partition_table_ext0 +PREHOOK: Output: default@rename_partition_table_ext1 +POSTHOOK: query: ALTER TABLE rename_partition_table_ext0 RENAME TO rename_partition_table_ext1 +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: default@rename_partition_table_ext0 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@rename_partition_table_ext0 +POSTHOOK: Output: default@rename_partition_table_ext1 +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table_ext1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table_ext1 +# col_name data_type comment +key string +val string + +# Partition Information +# col_name data_type comment +part string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +#### A masked pattern was here #### +Table Type: EXTERNAL_TABLE +Table Parameters: + COLUMN_STATS_ACCURATE {\"BASIC_STATS\":\"true\"} + EXTERNAL TRUE + bucketing_version 2 +#### A masked pattern was here #### + numFiles 2 + numPartitions 2 + numRows 746 + rawDataSize 131296 + totalSize 4669 +#### A masked pattern was here #### + +# Storage Information +SerDe Library: org.apache.hadoop.hive.ql.io.orc.OrcSerde +InputFormat: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] +Storage Desc Params: + serialization.format 1 +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='1') key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table_ext1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='1') key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table_ext1 +col_name key +data_type string +min +max +num_nulls 0 +distinct_count 198 +avg_col_len 2.8089430894308944 +max_col_len 3 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='1') val +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table_ext1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='1') val +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table_ext1 +col_name val +data_type string +min +max +num_nulls +distinct_count +avg_col_len +max_col_len +num_trues +num_falses +bit_vector +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='2') key +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table_ext1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='2') key +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table_ext1 +col_name key +data_type string +min +max +num_nulls 0 +distinct_count 316 +avg_col_len 2.812 +max_col_len 3 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='2') val +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@rename_partition_table_ext1 +POSTHOOK: query: DESCRIBE FORMATTED rename_partition_table_ext1 PARTITION (part='2') val +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@rename_partition_table_ext1 +col_name val +data_type string +min +max +num_nulls 0 +distinct_count 307 +avg_col_len 6.812 +max_col_len 7 +num_trues +num_falses +bit_vector HL +histogram +comment from deserializer +PREHOOK: query: DROP TABLE rename_partition_table1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@rename_partition_table1 +PREHOOK: Output: database:default +PREHOOK: Output: default@rename_partition_table1 +POSTHOOK: query: DROP TABLE rename_partition_table1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@rename_partition_table1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@rename_partition_table1 +PREHOOK: query: DROP TABLE rename_partition_table_ext1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@rename_partition_table_ext1 +PREHOOK: Output: database:default +PREHOOK: Output: default@rename_partition_table_ext1 +POSTHOOK: query: DROP TABLE rename_partition_table_ext1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@rename_partition_table_ext1 +POSTHOOK: Output: database:default +POSTHOOK: Output: default@rename_partition_table_ext1 diff --git a/ql/src/test/results/clientpositive/llap/schemeAuthority.q.out b/ql/src/test/results/clientpositive/llap/schemeAuthority.q.out index 009424ba8d23..47d458fefb7c 100644 --- a/ql/src/test/results/clientpositive/llap/schemeAuthority.q.out +++ b/ql/src/test/results/clientpositive/llap/schemeAuthority.q.out @@ -9,11 +9,9 @@ POSTHOOK: Output: default@dynPart #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### #### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/llap/schemeAuthority2.q.out b/ql/src/test/results/clientpositive/llap/schemeAuthority2.q.out index d3731f70e670..9009cd296714 100644 --- a/ql/src/test/results/clientpositive/llap/schemeAuthority2.q.out +++ b/ql/src/test/results/clientpositive/llap/schemeAuthority2.q.out @@ -9,11 +9,9 @@ POSTHOOK: Output: default@dynPart_n0 #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src -PREHOOK: Output: hdfs://### HDFS PATH ### #### A masked pattern was here #### POSTHOOK: type: QUERY POSTHOOK: Input: default@src -POSTHOOK: Output: hdfs://### HDFS PATH ### #### A masked pattern was here #### PREHOOK: type: QUERY PREHOOK: Input: default@src diff --git a/ql/src/test/results/clientpositive/llap/sharedwork_virtualcol_schema_merge.q.out b/ql/src/test/results/clientpositive/llap/sharedwork_virtualcol_schema_merge.q.out new file mode 100644 index 000000000000..0f42feefbde1 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/sharedwork_virtualcol_schema_merge.q.out @@ -0,0 +1,432 @@ +PREHOOK: query: create table t1(a int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1(a int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: explain +WITH t AS ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a, row_number() OVER (partition by INPUT__FILE__NAME) rn from t1 + where a = 1 + ) q + where rn=1 +) +select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from t1 where NOT (a = 1) AND INPUT__FILE__NAME IN (select INPUT__FILE__NAME from t) +union all +select * from t +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: explain +WITH t AS ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a, row_number() OVER (partition by INPUT__FILE__NAME) rn from t1 + where a = 1 + ) q + where rn=1 +) +select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from t1 where NOT (a = 1) AND INPUT__FILE__NAME IN (select INPUT__FILE__NAME from t) +union all +select * from t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 5 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 5 <- Map 4 (SIMPLE_EDGE) + Reducer 7 <- Map 6 (SIMPLE_EDGE), Union 3 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: ((a <> 1) and INPUT__FILE__NAME is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((a <> 1) and INPUT__FILE__NAME is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int), _col1 (type: bigint) + Execution mode: llap + LLAP IO: all inputs + Map 4 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: ((a = 1) and INPUT__FILE__NAME is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((a = 1) and INPUT__FILE__NAME is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: INPUT__FILE__NAME (type: string) + null sort order: a + sort order: + + Map-reduce partition columns: INPUT__FILE__NAME (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Execution mode: llap + LLAP IO: all inputs + Map 6 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: (a = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: INPUT__FILE__NAME (type: string) + null sort order: a + sort order: + + Map-reduce partition columns: INPUT__FILE__NAME (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + value expressions: BLOCK__OFFSET__INSIDE__FILE (type: bigint) + Execution mode: llap + LLAP IO: all inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col2 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint), _col2 (type: string), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 5 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + output shape: _col2: string + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col2 ASC NULLS FIRST + partition by: _col2 + raw input shape: + window functions: + window function definition + alias: row_number_window_0 + name: row_number + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (row_number_window_0 = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reducer 7 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col1 (type: bigint), KEY.reducesinkkey0 (type: string) + outputColumnNames: _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col2 ASC NULLS FIRST + partition by: _col2 + raw input shape: + window functions: + window function definition + alias: row_number_window_0 + name: row_number + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (row_number_window_0 = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint), _col2 (type: string), 1 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 3 + Vertex: Union 3 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain +WITH t AS ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a, row_number() OVER (partition by INPUT__FILE__NAME) rn from t1 + where a = 1 + ) q + where rn=1 +) +select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from t1 where NOT (a = 1) AND INPUT__FILE__NAME IN (select INPUT__FILE__NAME from t) +union all +select * from t +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +#### A masked pattern was here #### +POSTHOOK: query: explain +WITH t AS ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from ( + select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a, row_number() OVER (partition by INPUT__FILE__NAME) rn from t1 + where a = 1 + ) q + where rn=1 +) +select BLOCK__OFFSET__INSIDE__FILE, INPUT__FILE__NAME, a from t1 where NOT (a = 1) AND INPUT__FILE__NAME IN (select INPUT__FILE__NAME from t) +union all +select * from t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Reducer 2 <- Map 1 (SIMPLE_EDGE), Reducer 4 (SIMPLE_EDGE), Union 3 (CONTAINS) + Reducer 4 <- Map 1 (SIMPLE_EDGE) + Reducer 6 <- Map 5 (SIMPLE_EDGE), Union 3 (CONTAINS) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: (((a <> 1) and INPUT__FILE__NAME is not null) or ((a = 1) and INPUT__FILE__NAME is not null)) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((a <> 1) and INPUT__FILE__NAME is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: a (type: int), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col2 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col2 (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: int), _col1 (type: bigint) + Filter Operator + predicate: ((a = 1) and INPUT__FILE__NAME is not null) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: INPUT__FILE__NAME (type: string) + null sort order: a + sort order: + + Map-reduce partition columns: INPUT__FILE__NAME (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Execution mode: llap + LLAP IO: all inputs + Map 5 + Map Operator Tree: + TableScan + alias: t1 + filterExpr: (a = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (a = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: INPUT__FILE__NAME (type: string) + null sort order: a + sort order: + + Map-reduce partition columns: INPUT__FILE__NAME (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + value expressions: BLOCK__OFFSET__INSIDE__FILE (type: bigint) + Execution mode: llap + LLAP IO: all inputs + Reducer 2 + Execution mode: llap + Reduce Operator Tree: + Merge Join Operator + condition map: + Left Semi Join 0 to 1 + keys: + 0 _col2 (type: string) + 1 _col0 (type: string) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint), _col2 (type: string), _col0 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Reducer 4 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string) + outputColumnNames: _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + output shape: _col2: string + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col2 ASC NULLS FIRST + partition by: _col2 + raw input shape: + window functions: + window function definition + alias: row_number_window_0 + name: row_number + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (row_number_window_0 = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Group By Operator + keys: _col0 (type: string) + minReductionHashAggr: 0.99 + mode: hash + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Reducer 6 + Execution mode: vectorized, llap + Reduce Operator Tree: + Select Operator + expressions: VALUE._col1 (type: bigint), KEY.reducesinkkey0 (type: string) + outputColumnNames: _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + PTF Operator + Function definitions: + Input definition + input alias: ptf_0 + type: WINDOWING + Windowing table definition + input alias: ptf_1 + name: windowingtablefunction + order by: _col2 ASC NULLS FIRST + partition by: _col2 + raw input shape: + window functions: + window function definition + alias: row_number_window_0 + name: row_number + window function: GenericUDAFRowNumberEvaluator + window frame: ROWS PRECEDING(MAX)~FOLLOWING(MAX) + isPivotResult: true + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: (row_number_window_0 = 1) (type: boolean) + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col1 (type: bigint), _col2 (type: string), 1 (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Union 3 + Vertex: Union 3 + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/llap/show_functions.q.out b/ql/src/test/results/clientpositive/llap/show_functions.q.out index 3eab5bdb7ccc..24105d060102 100644 --- a/ql/src/test/results/clientpositive/llap/show_functions.q.out +++ b/ql/src/test/results/clientpositive/llap/show_functions.q.out @@ -53,6 +53,7 @@ array_intersect array_join array_max array_min +array_position array_remove array_slice array_union @@ -83,7 +84,6 @@ collect_list collect_set compute_bit_vector_fm compute_bit_vector_hll -compute_stats concat concat_ws context_ngrams @@ -454,6 +454,9 @@ to_date to_epoch_milli to_unix_timestamp to_utc_timestamp +toarray +tomap +tostruct translate trim trunc @@ -530,7 +533,6 @@ collect_list collect_set compute_bit_vector_fm compute_bit_vector_hll -compute_stats concat concat_ws context_ngrams @@ -680,6 +682,7 @@ array_intersect array_join array_max array_min +array_position array_remove array_slice array_union @@ -710,7 +713,6 @@ collect_list collect_set compute_bit_vector_fm compute_bit_vector_hll -compute_stats concat concat_ws context_ngrams @@ -1081,6 +1083,9 @@ to_date to_epoch_milli to_unix_timestamp to_utc_timestamp +toarray +tomap +tostruct translate trim trunc diff --git a/ql/src/test/results/clientpositive/llap/subquery_complex_correlation_predicates.q.out b/ql/src/test/results/clientpositive/llap/subquery_complex_correlation_predicates.q.out index fa9768cc5de4..95055833efd6 100644 --- a/ql/src/test/results/clientpositive/llap/subquery_complex_correlation_predicates.q.out +++ b/ql/src/test/results/clientpositive/llap/subquery_complex_correlation_predicates.q.out @@ -290,3 +290,69 @@ POSTHOOK: Input: default@author POSTHOOK: Input: default@book #### A masked pattern was here #### Men Without Women +PREHOOK: query: drop table if exists store_sales +PREHOOK: type: DROPTABLE +PREHOOK: Output: database:default +POSTHOOK: query: drop table if exists store_sales +POSTHOOK: type: DROPTABLE +POSTHOOK: Output: database:default +PREHOOK: query: create table store_sales (promo_sk int, sales_price int, list_price int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@store_sales +POSTHOOK: query: create table store_sales (promo_sk int, sales_price int, list_price int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@store_sales +PREHOOK: query: insert into store_sales values (1, 20, 15), (1, 15, 20), (1, 10, 15) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: default@store_sales +POSTHOOK: query: insert into store_sales values (1, 20, 15), (1, 15, 20), (1, 10, 15) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: default@store_sales +POSTHOOK: Lineage: store_sales.list_price SCRIPT [] +POSTHOOK: Lineage: store_sales.promo_sk SCRIPT [] +POSTHOOK: Lineage: store_sales.sales_price SCRIPT [] +PREHOOK: query: explain cbo +select * from store_sales A where exists( +select 1 from store_sales B + where A.promo_sk = B.promo_sk and A.sales_price > B.list_price and A.sales_price < B.sales_price) +PREHOOK: type: QUERY +PREHOOK: Input: default@store_sales +#### A masked pattern was here #### +POSTHOOK: query: explain cbo +select * from store_sales A where exists( +select 1 from store_sales B + where A.promo_sk = B.promo_sk and A.sales_price > B.list_price and A.sales_price < B.sales_price) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@store_sales +#### A masked pattern was here #### +CBO PLAN: +HiveSemiJoin(condition=[AND(=($3, $0), =($4, $1))], joinType=[semi]) + HiveProject(promo_sk=[$0], sales_price=[$1], list_price=[$2]) + HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($1))]) + HiveTableScan(table=[[default, store_sales]], table:alias=[a]) + HiveProject(promo_sk0=[$3], sales_price0=[$4]) + HiveJoin(condition=[AND(=($3, $0), >($4, $2), <($4, $1))], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(promo_sk=[$0], sales_price=[$1], list_price=[$2]) + HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($2), IS NOT NULL($1))]) + HiveTableScan(table=[[default, store_sales]], table:alias=[b]) + HiveProject(promo_sk=[$0], sales_price=[$1]) + HiveAggregate(group=[{0, 1}]) + HiveFilter(condition=[AND(IS NOT NULL($0), IS NOT NULL($1))]) + HiveTableScan(table=[[default, store_sales]], table:alias=[a]) + +PREHOOK: query: select * from store_sales A where exists( +select 1 from store_sales B + where A.promo_sk = B.promo_sk and A.sales_price > B.list_price and A.sales_price < B.sales_price) +PREHOOK: type: QUERY +PREHOOK: Input: default@store_sales +#### A masked pattern was here #### +POSTHOOK: query: select * from store_sales A where exists( +select 1 from store_sales B + where A.promo_sk = B.promo_sk and A.sales_price > B.list_price and A.sales_price < B.sales_price) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@store_sales +#### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/llap/subquery_join.q.out b/ql/src/test/results/clientpositive/llap/subquery_join.q.out new file mode 100644 index 000000000000..737a2cab3951 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/subquery_join.q.out @@ -0,0 +1,115 @@ +PREHOOK: query: create table t1 (id int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t1 +POSTHOOK: query: create table t1 (id int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t1 +PREHOOK: query: create table t2 (id int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@t2 +POSTHOOK: query: create table t2 (id int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@t2 +Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +PREHOOK: query: explain cbo select id, + (select count(*) from t1 join t2 on t1.id=t2.id) + from t2 +order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: explain cbo select id, + (select count(*) from t1 join t2 on t1.id=t2.id) + from t2 +order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +CBO PLAN: +HiveSortLimit(sort0=[$0], dir0=[ASC]) + HiveProject(id=[$0], $f0=[$1]) + HiveJoin(condition=[true], joinType=[left], algorithm=[none], cost=[not available]) + HiveProject(id=[$0]) + HiveTableScan(table=[[default, t2]], table:alias=[t2]) + HiveProject($f0=[$0]) + HiveAggregate(group=[{}], agg#0=[count()]) + HiveJoin(condition=[=($0, $1)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(id=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t1]], table:alias=[t1]) + HiveProject(id=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t2]], table:alias=[t2]) + +Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +PREHOOK: query: explain cbo select id, + (select count(*) from t1 join t2 using (id)) + from t2 +order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: explain cbo select id, + (select count(*) from t1 join t2 using (id)) + from t2 +order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +CBO PLAN: +HiveSortLimit(sort0=[$0], dir0=[ASC]) + HiveProject(id=[$0], $f0=[$1]) + HiveJoin(condition=[true], joinType=[left], algorithm=[none], cost=[not available]) + HiveProject(id=[$0]) + HiveTableScan(table=[[default, t2]], table:alias=[t2]) + HiveProject($f0=[$0]) + HiveAggregate(group=[{}], agg#0=[count()]) + HiveJoin(condition=[=($0, $1)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(id=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t1]], table:alias=[t1]) + HiveProject(id=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t2]], table:alias=[t2]) + +Warning: Shuffle Join MERGEJOIN[38][tables = [$hdt$_0, $hdt$_1]] in Stage 'Reducer 2' is a cross product +PREHOOK: query: explain cbo select id, + (select count(*) from t1 join t2 where t1.id=t2.id) + from t2 +order by id +PREHOOK: type: QUERY +PREHOOK: Input: default@t1 +PREHOOK: Input: default@t2 +#### A masked pattern was here #### +POSTHOOK: query: explain cbo select id, + (select count(*) from t1 join t2 where t1.id=t2.id) + from t2 +order by id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@t1 +POSTHOOK: Input: default@t2 +#### A masked pattern was here #### +CBO PLAN: +HiveSortLimit(sort0=[$0], dir0=[ASC]) + HiveProject(id=[$0], $f0=[$1]) + HiveJoin(condition=[true], joinType=[left], algorithm=[none], cost=[not available]) + HiveProject(id=[$0]) + HiveTableScan(table=[[default, t2]], table:alias=[t2]) + HiveProject($f0=[$0]) + HiveAggregate(group=[{}], agg#0=[count()]) + HiveJoin(condition=[=($0, $1)], joinType=[inner], algorithm=[none], cost=[not available]) + HiveProject(id=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t1]], table:alias=[t1]) + HiveProject(id=[$0]) + HiveFilter(condition=[IS NOT NULL($0)]) + HiveTableScan(table=[[default, t2]], table:alias=[t2]) + diff --git a/ql/src/test/results/clientpositive/llap/sysdb.q.out b/ql/src/test/results/clientpositive/llap/sysdb.q.out index c2cdcbe4f93f..368544c35bf8 100644 --- a/ql/src/test/results/clientpositive/llap/sysdb.q.out +++ b/ql/src/test/results/clientpositive/llap/sysdb.q.out @@ -1482,7 +1482,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table POSTHOOK: Input: sys@version #### A masked pattern was here #### -4.0.0-beta-2 +4.1.0 PREHOOK: query: select func_name, func_type from funcs order by func_name, func_type limit 5 PREHOOK: type: QUERY PREHOOK: Input: sys@funcs @@ -1640,8 +1640,8 @@ POSTHOOK: Input: sys@compaction_queue POSTHOOK: Input: sys@compactions POSTHOOK: Input: sys@completed_compactions #### A masked pattern was here #### -1 default default scr_txn NULL major initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-beta-2-SNAPSHOT NULL default NULL NULL -2 default default scr_txn_2 NULL minor initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-beta-2-SNAPSHOT NULL default NULL NULL +1 default default scr_txn NULL major initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.1.0-SNAPSHOT NULL default NULL NULL +2 default default scr_txn_2 NULL minor initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.1.0-SNAPSHOT NULL default NULL NULL PREHOOK: query: use INFORMATION_SCHEMA PREHOOK: type: SWITCHDATABASE PREHOOK: Input: database:information_schema @@ -1876,8 +1876,8 @@ POSTHOOK: Input: sys@dbs POSTHOOK: Input: sys@tbl_privs POSTHOOK: Input: sys@tbls #### A masked pattern was here #### -1 default default scr_txn NULL major initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-beta-2-SNAPSHOT NULL default NULL NULL -2 default default scr_txn_2 NULL minor initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.0.0-beta-2-SNAPSHOT NULL default NULL NULL +1 default default scr_txn NULL major initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.1.0-SNAPSHOT NULL default NULL NULL +2 default default scr_txn_2 NULL minor initiated NULL NULL NULL #Masked# NULL NULL NULL NULL NULL NULL NULL NULL NULL #Masked# manual 4.1.0-SNAPSHOT NULL default NULL NULL PREHOOK: query: select TXN_ID, STATE, AGENT_INFO, META_INFO, HEARTBEAT_COUNT, TYPE, TC_DATABASE, TC_TABLE, TC_PARTITION, TC_OPERATION_TYPE, TC_WRITEID from TRANSACTIONS PREHOOK: type: QUERY PREHOOK: Input: information_schema@transactions diff --git a/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_4.q.out b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_4.q.out new file mode 100644 index 000000000000..9c999893817e --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/tez_dynpart_hashjoin_4.q.out @@ -0,0 +1,209 @@ +PREHOOK: query: CREATE TABLE table_a (start_date date, product_id int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table_a +POSTHOOK: query: CREATE TABLE table_a (start_date date, product_id int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table_a +PREHOOK: query: ALTER TABLE table_a UPDATE STATISTICS SET('numRows'='200000000','rawDataSize'='0' ) +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_a +PREHOOK: Output: default@table_a +POSTHOOK: query: ALTER TABLE table_a UPDATE STATISTICS SET('numRows'='200000000','rawDataSize'='0' ) +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_a +POSTHOOK: Output: default@table_a +PREHOOK: query: ALTER TABLE table_a UPDATE STATISTICS FOR COLUMN product_id SET('lowValue'='1000000','highValue'='100000000','numNulls'='0','numDVs'='300000' ) +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_a +PREHOOK: Output: default@table_a +POSTHOOK: query: ALTER TABLE table_a UPDATE STATISTICS FOR COLUMN product_id SET('lowValue'='1000000','highValue'='100000000','numNulls'='0','numDVs'='300000' ) +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_a +POSTHOOK: Output: default@table_a +PREHOOK: query: ALTER TABLE table_a UPDATE STATISTICS FOR COLUMN start_date SET('lowValue'='10000','highValue'='20000','numNulls'='0','numDVs'='2500' ) +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_a +PREHOOK: Output: default@table_a +POSTHOOK: query: ALTER TABLE table_a UPDATE STATISTICS FOR COLUMN start_date SET('lowValue'='10000','highValue'='20000','numNulls'='0','numDVs'='2500' ) +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_a +POSTHOOK: Output: default@table_a +PREHOOK: query: CREATE TABLE table_b (start_date date, product_id int, product_sk string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@table_b +POSTHOOK: query: CREATE TABLE table_b (start_date date, product_id int, product_sk string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@table_b +PREHOOK: query: ALTER TABLE table_b UPDATE STATISTICS SET('numRows'='100000000','rawDataSize'='0' ) +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_b +PREHOOK: Output: default@table_b +POSTHOOK: query: ALTER TABLE table_b UPDATE STATISTICS SET('numRows'='100000000','rawDataSize'='0' ) +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_b +POSTHOOK: Output: default@table_b +PREHOOK: query: ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN product_id SET('lowValue'='1000000','highValue'='100000000','numNulls'='0','numDVs'='300000' ) +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_b +PREHOOK: Output: default@table_b +POSTHOOK: query: ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN product_id SET('lowValue'='1000000','highValue'='100000000','numNulls'='0','numDVs'='300000' ) +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_b +POSTHOOK: Output: default@table_b +PREHOOK: query: ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN start_date SET('lowValue'='10000','highValue'='20000','numNulls'='0','numDVs'='500' ) +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_b +PREHOOK: Output: default@table_b +POSTHOOK: query: ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN start_date SET('lowValue'='10000','highValue'='20000','numNulls'='0','numDVs'='500' ) +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_b +POSTHOOK: Output: default@table_b +PREHOOK: query: ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN product_sk SET ('numDVs'='300000','numNulls'='0','avgColLen'='10','maxColLen'='10') +PREHOOK: type: ALTERTABLE_UPDATETABLESTATS +PREHOOK: Input: default@table_b +PREHOOK: Output: default@table_b +POSTHOOK: query: ALTER TABLE table_b UPDATE STATISTICS FOR COLUMN product_sk SET ('numDVs'='300000','numNulls'='0','avgColLen'='10','maxColLen'='10') +POSTHOOK: type: ALTERTABLE_UPDATETABLESTATS +POSTHOOK: Input: default@table_b +POSTHOOK: Output: default@table_b +Warning: Map Join MAPJOIN[31][bigTable=?] in task 'Map 1' is a cross product +PREHOOK: query: EXPLAIN +SELECT TC.CONST_DATE, TB.PRODUCT_SK +FROM TABLE_A TA +INNER JOIN (SELECT TO_DATE(FROM_UNIXTIME(1701088643)) AS CONST_DATE) TC + ON TA.START_DATE = TC.CONST_DATE +INNER JOIN TABLE_B TB + ON TB.START_DATE = TC.CONST_DATE AND TA.PRODUCT_ID = TB.PRODUCT_ID +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Input: default@table_a +PREHOOK: Input: default@table_b +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN +SELECT TC.CONST_DATE, TB.PRODUCT_SK +FROM TABLE_A TA +INNER JOIN (SELECT TO_DATE(FROM_UNIXTIME(1701088643)) AS CONST_DATE) TC + ON TA.START_DATE = TC.CONST_DATE +INNER JOIN TABLE_B TB + ON TB.START_DATE = TC.CONST_DATE AND TA.PRODUCT_ID = TB.PRODUCT_ID +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Input: default@table_a +POSTHOOK: Input: default@table_b +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Tez +#### A masked pattern was here #### + Edges: + Map 1 <- Map 2 (BROADCAST_EDGE) + Reducer 4 <- Map 1 (CUSTOM_SIMPLE_EDGE), Map 3 (CUSTOM_SIMPLE_EDGE) +#### A masked pattern was here #### + Vertices: + Map 1 + Map Operator Tree: + TableScan + alias: tb + filterExpr: ((start_date = DATE'2023-11-27') and product_id is not null) (type: boolean) + Statistics: Num rows: 100000000 Data size: 15400000000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((start_date = DATE'2023-11-27') and product_id is not null) (type: boolean) + Statistics: Num rows: 50000000 Data size: 7700000000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: product_id (type: int), product_sk (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 50000000 Data size: 4900000000 Basic stats: COMPLETE Column stats: COMPLETE + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 + 1 + outputColumnNames: _col0, _col1 + input vertices: + 1 Map 2 + Statistics: Num rows: 50000000 Data size: 4900000000 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 50000000 Data size: 4900000000 Basic stats: COMPLETE Column stats: COMPLETE + value expressions: _col1 (type: string) + Execution mode: vectorized, llap + LLAP IO: all inputs + Map 2 + Map Operator Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Statistics: Num rows: 1 Data size: 10 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + null sort order: + sort order: + Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: llap + LLAP IO: no inputs + Map 3 + Map Operator Tree: + TableScan + alias: ta + filterExpr: ((start_date = DATE'2023-11-27') and product_id is not null) (type: boolean) + Statistics: Num rows: 200000000 Data size: 12000000000 Basic stats: COMPLETE Column stats: COMPLETE + Filter Operator + predicate: ((start_date = DATE'2023-11-27') and product_id is not null) (type: boolean) + Statistics: Num rows: 100000000 Data size: 6000000000 Basic stats: COMPLETE Column stats: COMPLETE + Select Operator + expressions: product_id (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 100000000 Data size: 400000000 Basic stats: COMPLETE Column stats: COMPLETE + Reduce Output Operator + key expressions: _col0 (type: int) + null sort order: z + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 100000000 Data size: 400000000 Basic stats: COMPLETE Column stats: COMPLETE + Execution mode: vectorized, llap + LLAP IO: all inputs + Reducer 4 + Execution mode: vectorized, llap + Reduce Operator Tree: + Map Join Operator + condition map: + Inner Join 0 to 1 + keys: + 0 KEY.reducesinkkey0 (type: int) + 1 KEY.reducesinkkey0 (type: int) + outputColumnNames: _col1 + input vertices: + 0 Map 1 + Statistics: Num rows: 16666666666 Data size: 1566666666604 Basic stats: COMPLETE Column stats: COMPLETE + DynamicPartitionHashJoin: true + Select Operator + expressions: DATE'2023-11-27' (type: date), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16666666666 Data size: 2499999999900 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + Statistics: Num rows: 16666666666 Data size: 2499999999900 Basic stats: COMPLETE Column stats: COMPLETE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/llap/udf_array_position.q.out b/ql/src/test/results/clientpositive/llap/udf_array_position.q.out new file mode 100644 index 000000000000..ed185418b708 --- /dev/null +++ b/ql/src/test/results/clientpositive/llap/udf_array_position.q.out @@ -0,0 +1,172 @@ +PREHOOK: query: DESCRIBE FUNCTION array_position +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: DESCRIBE FUNCTION array_position +POSTHOOK: type: DESCFUNCTION +array_position(array, element) - Returns the position of the first occurrence of element in array. Array indexing starts at 1. If the element value is NULL, a NULL is returned. +PREHOOK: query: DESCRIBE FUNCTION EXTENDED array_position +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: DESCRIBE FUNCTION EXTENDED array_position +POSTHOOK: type: DESCFUNCTION +array_position(array, element) - Returns the position of the first occurrence of element in array. Array indexing starts at 1. If the element value is NULL, a NULL is returned. +Example: + > SELECT array_position(array(1, 2, 3,4,2), 2) FROM src; + 2 +Function class:org.apache.hadoop.hive.ql.udf.generic.GenericUDFArrayPosition +Function type:BUILTIN +PREHOOK: query: SELECT array_position(array(1, 2, 3, null,3,4), 3) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array(1, 2, 3, null,3,4), 3) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +3 +PREHOOK: query: SELECT array_position(array(1.12, 2.23, 3.34, null,1.11,1.12,2.9),1.12) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array(1.12, 2.23, 3.34, null,1.11,1.12,2.9),1.12) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: SELECT array(1,2,3),array_position(array(1, 2, 3),3) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array(1,2,3),array_position(array(1, 2, 3),3) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +[1,2,3] 3 +PREHOOK: query: SELECT array(1,2,3),array_position(array(1, 2, 3),5) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array(1,2,3),array_position(array(1, 2, 3),5) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +[1,2,3] 0 +PREHOOK: query: SELECT array_position(array(1, 2, 3), CAST(null AS int)) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array(1, 2, 3), CAST(null AS int)) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +NULL +PREHOOK: query: SELECT array_position(array(1.1234567890, 2.234567890, 3.34567890, null, 3.3456789, 2.234567,1.1234567890),1.1234567890) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array(1.1234567890, 2.234567890, 3.34567890, null, 3.3456789, 2.234567,1.1234567890),1.1234567890) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: SELECT array_position(array(11234567890, 2234567890, 334567890, null, 11234567890, 2234567890, 334567890, null),11234567890) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array(11234567890, 2234567890, 334567890, null, 11234567890, 2234567890, 334567890, null),11234567890) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: SELECT array_position(array(array("a","b","c","d"),array("a","b","c","d"),array("a","b","c","d","e"),null,array("e","a","b","c","d")),array("a","b","c","d")) +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array(array("a","b","c","d"),array("a","b","c","d"),array("a","b","c","d","e"),null,array("e","a","b","c","d")),array("a","b","c","d")) +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +1 +PREHOOK: query: SELECT array_position(array("aa","bb","cc"),"cc") +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT array_position(array("aa","bb","cc"),"cc") +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +3 +PREHOOK: query: create table test as select array('a', 'b', 'c', 'b') as a union all select array('a', 'c', 'd') as a +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: _dummy_database@_dummy_table +PREHOOK: Output: database:default +PREHOOK: Output: default@test +POSTHOOK: query: create table test as select array('a', 'b', 'c', 'b') as a union all select array('a', 'c', 'd') as a +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: _dummy_database@_dummy_table +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test +POSTHOOK: Lineage: test.a SCRIPT [] +PREHOOK: query: select * from test +PREHOOK: type: QUERY +PREHOOK: Input: default@test +#### A masked pattern was here #### +POSTHOOK: query: select * from test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test +#### A masked pattern was here #### +["a","b","c","b"] +["a","c","d"] +PREHOOK: query: select a, array_position(a, 'b') from test +PREHOOK: type: QUERY +PREHOOK: Input: default@test +#### A masked pattern was here #### +POSTHOOK: query: select a, array_position(a, 'b') from test +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test +#### A masked pattern was here #### +["a","b","c","b"] 2 +["a","c","d"] 0 +PREHOOK: query: create external table test_null_array (id string, value Array) ROW FORMAT DELIMITED +#### A masked pattern was here #### +PREHOOK: type: CREATETABLE +#### A masked pattern was here #### +PREHOOK: Output: database:default +PREHOOK: Output: default@test_null_array +POSTHOOK: query: create external table test_null_array (id string, value Array) ROW FORMAT DELIMITED +#### A masked pattern was here #### +POSTHOOK: type: CREATETABLE +#### A masked pattern was here #### +POSTHOOK: Output: database:default +POSTHOOK: Output: default@test_null_array +PREHOOK: query: select id,value from test_null_array +PREHOOK: type: QUERY +PREHOOK: Input: default@test_null_array +#### A masked pattern was here #### +POSTHOOK: query: select id,value from test_null_array +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_null_array +#### A masked pattern was here #### +1 [] +2 ["NULL"] +3 ["null","null"] +PREHOOK: query: select id,array_position(value,id) from test_null_array +PREHOOK: type: QUERY +PREHOOK: Input: default@test_null_array +#### A masked pattern was here #### +POSTHOOK: query: select id,array_position(value,id) from test_null_array +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_null_array +#### A masked pattern was here #### +1 0 +2 0 +3 0 +PREHOOK: query: select value, array_position(value,id) from test_null_array +PREHOOK: type: QUERY +PREHOOK: Input: default@test_null_array +#### A masked pattern was here #### +POSTHOOK: query: select value, array_position(value,id) from test_null_array +POSTHOOK: type: QUERY +POSTHOOK: Input: default@test_null_array +#### A masked pattern was here #### +["NULL"] 0 +["null","null"] 0 +[] 0 diff --git a/ql/src/test/results/clientpositive/llap/udtf_stack.q.out b/ql/src/test/results/clientpositive/llap/udtf_stack.q.out index 2e5bba2e388c..ad176cea611c 100644 --- a/ql/src/test/results/clientpositive/llap/udtf_stack.q.out +++ b/ql/src/test/results/clientpositive/llap/udtf_stack.q.out @@ -147,3 +147,42 @@ POSTHOOK: type: QUERY POSTHOOK: Input: _dummy_database@_dummy_table #### A masked pattern was here #### en dbpedia NULL +PREHOOK: query: EXPLAIN SELECT STACK(2, 'a', 'b', 'c', 'd', 'e') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: EXPLAIN SELECT STACK(2, 'a', 'b', 'c', 'd', 'e') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_table + Row Limit Per Split: 1 + Select Operator + expressions: 2 (type: int), 'a' (type: string), 'b' (type: string), 'c' (type: string), 'd' (type: string), 'e' (type: string) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 + UDTF Operator + function name: stack + Select Operator + expressions: col0 (type: string), col1 (type: string), col2 (type: string) + outputColumnNames: _col0, _col1, _col2 + ListSink + +PREHOOK: query: SELECT STACK(2, 'a', 'b', 'c', 'd', 'e') +PREHOOK: type: QUERY +PREHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +POSTHOOK: query: SELECT STACK(2, 'a', 'b', 'c', 'd', 'e') +POSTHOOK: type: QUERY +POSTHOOK: Input: _dummy_database@_dummy_table +#### A masked pattern was here #### +a b c +d e NULL diff --git a/serde/pom.xml b/serde/pom.xml index 4d391bfcf56f..b6c0f53edace 100644 --- a/serde/pom.xml +++ b/serde/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-serde diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java b/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java index 1512a0eea0e6..bd0e9f5ed27a 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/DefaultFetchFormatter.java @@ -43,7 +43,7 @@ public void initialize(Configuration hconf, Properties props) throws SerDeExcept } private AbstractSerDe initializeSerde(Configuration conf, Properties props) throws SerDeException { - String serdeName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE); + String serdeName = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_FETCH_OUTPUT_SERDE); Class serdeClass; try { serdeClass = diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java index 34e5a4a9bafe..12fdfd66304d 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazy/fast/LazySimpleDeserializeRead.java @@ -875,15 +875,11 @@ private boolean doReadField(Field field) { case STRUCT: case UNION: { - if (currentLevel > 0) { - - // Check for Map which occupies 2 levels (key separator and key/value pair separator). - if (currentComplexTypeHelpers[currentLevel - 1] == null) { - Preconditions.checkState(currentLevel > 1); - Preconditions.checkState( - currentComplexTypeHelpers[currentLevel - 2] instanceof MapComplexTypeHelper); - currentLevel++; - } + // Check for Map which occupies 2 levels (key separator and key/value pair separator). + if (currentLevel > 0 + && currentComplexTypeHelpers[currentLevel] == null + && currentComplexTypeHelpers[currentLevel - 1] instanceof MapComplexTypeHelper) { + currentLevel++; } ComplexTypeHelper complexTypeHelper = field.complexTypeHelper; currentComplexTypeHelpers[currentLevel++] = complexTypeHelper; diff --git a/service-rpc/pom.xml b/service-rpc/pom.xml index c6d66459cee6..bb851977614d 100644 --- a/service-rpc/pom.xml +++ b/service-rpc/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-service-rpc diff --git a/service/pom.xml b/service/pom.xml index 9d0a9d2f3f47..dfd9ce397c57 100644 --- a/service/pom.xml +++ b/service/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-service @@ -185,10 +185,16 @@ org.pac4j pac4j-saml-opensamlv3 + + + org.bouncycastle + bcprov-jdk15on + + org.bouncycastle - bcprov-jdk15on + bcprov-jdk18on org.apache.santuario diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java b/service/src/java/org/apache/hive/service/cli/CLIService.java index 151b2067576f..a061536f857d 100644 --- a/service/src/java/org/apache/hive/service/cli/CLIService.java +++ b/service/src/java/org/apache/hive/service/cli/CLIService.java @@ -510,6 +510,10 @@ public OperationStatus getOperationStatus(OperationHandle opHandle, boolean getP opStatus.setJobProgressUpdate(progressUpdateLog(getProgressUpdate, operation, conf)); return opStatus; } + public HiveConf getHiveSessionConf(OperationHandle opHandle) throws HiveSQLException { + Operation operation = sessionManager.getOperationManager().getOperation(opHandle); + return operation.getParentSession().getHiveConf(); + } public HiveConf getSessionConf(SessionHandle sessionHandle) throws HiveSQLException { diff --git a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java index 4aa142f52f35..d7deba7b2746 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/SQLOperation.java @@ -322,7 +322,7 @@ public Object run() throws HiveSQLException { Hive.set(parentHive); // TODO: can this result in cross-thread reuse of session state? SessionState.setCurrentSessionState(parentSessionState); - PerfLogger.setPerfLogger(SessionState.getPerfLogger()); + SessionState.getPerfLogger(); if (!embedded) { LogUtils.registerLoggingContext(queryState.getConf()); } diff --git a/service/src/java/org/apache/hive/service/cli/operation/hplsql/HplSqlOperation.java b/service/src/java/org/apache/hive/service/cli/operation/hplsql/HplSqlOperation.java index 2bd829fa8061..16d16792aedd 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/hplsql/HplSqlOperation.java +++ b/service/src/java/org/apache/hive/service/cli/operation/hplsql/HplSqlOperation.java @@ -199,7 +199,7 @@ public void run() { assert (!parentHive.allowClose()); Hive.set(parentHive); SessionState.setCurrentSessionState(parentSessionState); - PerfLogger.setPerfLogger(SessionState.getPerfLogger()); + SessionState.getPerfLogger(); LogUtils.registerLoggingContext(queryState.getConf()); ShimLoader.getHadoopShims() .setHadoopQueryContext(String.format(USER_ID, queryState.getQueryId(), parentSessionState.getUserName())); diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index c14a041ba013..e4151a617cdd 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -139,7 +139,7 @@ public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, this.operationLock = serverConf.getBoolVar( ConfVars.HIVE_SERVER2_PARALLEL_OPS_IN_SESSION) ? null : new Semaphore(1); // Set an explicit session name to control the download directory name - sessionConf.set(ConfVars.HIVESESSIONID.varname, + sessionConf.set(ConfVars.HIVE_SESSION_ID.varname, this.sessionHandle.getHandleIdentifier().toString()); // Use thrift transportable formatter sessionConf.set(SerDeUtils.LIST_SINK_OUTPUT_FORMATTER, ThriftFormatter.class.getName()); @@ -468,7 +468,7 @@ public String getPassword() { @Override public HiveConf getHiveConf() { - sessionConf.setVar(HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, FETCH_WORK_SERDE_CLASS); + sessionConf.setVar(HiveConf.ConfVars.HIVE_FETCH_OUTPUT_SERDE, FETCH_WORK_SERDE_CLASS); return sessionConf; } diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index d93d54b33b1e..7d71881dc844 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -794,7 +794,7 @@ public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) th try { OperationStatus operationStatus = cliService.getOperationStatus(operationHandle, req.isGetProgressUpdate()); - + HiveConf sessionConf = cliService.getHiveSessionConf(operationHandle); if (operationStatus.getState().equals(OperationState.FINISHED)) { long numModifiedRows = operationStatus.getNumModifiedRows(); resp.setNumModifiedRows(numModifiedRows); @@ -810,7 +810,7 @@ public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) th } JobProgressUpdate progressUpdate = operationStatus.jobProgressUpdate(); ProgressMonitorStatusMapper mapper = ProgressMonitorStatusMapper.DEFAULT; - if ("tez".equals(hiveConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE))) { + if ("tez".equals(sessionConf.getVar(ConfVars.HIVE_EXECUTION_ENGINE))) { mapper = new TezProgressMonitorStatusMapper(); } TJobExecutionStatus executionStatus = diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java index bc20ea7797eb..63d23c11d38a 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpCLIService.java @@ -160,7 +160,7 @@ public void onClosed(Connection connection) { if (keyStoreAlgorithm.isEmpty()) { keyStoreAlgorithm = KeyManagerFactory.getDefaultAlgorithm(); } - SslContextFactory sslContextFactory = new SslContextFactory(); + SslContextFactory sslContextFactory = new SslContextFactory.Server(); String[] excludedProtocols = hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(","); LOG.info("HTTP Server SSL: adding excluded protocols: " + Arrays.toString(excludedProtocols)); sslContextFactory.addExcludeProtocols(excludedProtocols); diff --git a/service/src/java/org/apache/hive/service/server/HiveServer2.java b/service/src/java/org/apache/hive/service/server/HiveServer2.java index e8eaab550aef..e1650e86f6be 100644 --- a/service/src/java/org/apache/hive/service/server/HiveServer2.java +++ b/service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -52,7 +52,6 @@ import org.apache.curator.framework.api.CuratorEventType; import org.apache.curator.framework.recipes.leader.LeaderLatch; import org.apache.curator.framework.recipes.leader.LeaderLatchListener; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.JvmPauseMonitor; import org.apache.hadoop.hive.common.LogUtils; import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; @@ -106,7 +105,6 @@ import org.apache.hive.service.ServiceException; import org.apache.hive.service.auth.AuthType; import org.apache.hive.service.auth.saml.HiveSaml2Client; -import org.apache.hive.service.auth.saml.HiveSamlUtils; import org.apache.hive.service.cli.CLIService; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.session.HiveSession; @@ -1044,7 +1042,7 @@ public static void scheduleClearDanglingScratchDir(HiveConf hiveConf, int initia .daemon(true) .build()); executor.scheduleAtFixedRate(new ClearDanglingScratchDir(false, false, false, - HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCHDIR), hiveConf), initialWaitInSec, + HiveConf.getVar(hiveConf, HiveConf.ConfVars.SCRATCH_DIR), hiveConf), initialWaitInSec, HiveConf.getTimeVar(hiveConf, ConfVars.HIVE_SERVER2_CLEAR_DANGLING_SCRATCH_DIR_INTERVAL, TimeUnit.SECONDS), TimeUnit.SECONDS); } diff --git a/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java b/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java index c19d97abe8f4..e1fbdfcdca3a 100644 --- a/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java +++ b/service/src/test/org/apache/hive/service/cli/session/TestSessionCleanup.java @@ -105,7 +105,7 @@ public void testTempSessionFileCleanup() throws Exception { private String[] getPipeoutFiles(HiveConf hiveConf) { File localScratchDir = new File( - hiveConf.getVar(HiveConf.ConfVars.LOCALSCRATCHDIR)); + hiveConf.getVar(HiveConf.ConfVars.LOCAL_SCRATCH_DIR)); String[] pipeoutFiles = localScratchDir.list(new FilenameFilter() { @Override public boolean accept(File dir, String name) { diff --git a/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java b/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java index a3356658c12e..7f31dce576fb 100644 --- a/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java +++ b/service/src/test/org/apache/hive/service/cli/session/TestSessionManagerMetrics.java @@ -26,7 +26,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import com.fasterxml.jackson.databind.JsonNode; import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; @@ -44,7 +43,6 @@ import org.apache.hive.service.cli.operation.MetadataOperation; import org.apache.hive.service.cli.operation.OperationManager; import org.apache.hive.service.rpc.thrift.TProtocolVersion; -import org.apache.hive.service.server.HiveServer2; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -76,7 +74,7 @@ public void setup() throws Exception { conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_METRICS_ENABLED, true); conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, MetricsReporting.JSON_FILE.name() + "," + MetricsReporting.JMX.name()); - conf.setBoolVar(HiveConf.ConfVars.HIVEOPTIMIZEMETADATAQUERIES, false); + conf.setBoolVar(HiveConf.ConfVars.HIVE_OPTIMIZE_METADATA_QUERIES, false); //NOTES: If we enable operation log, SessionManager will delete operation logs directory on exit, //it maybe impact TestSessionCleanup, because they use the same location ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION, // when we run testing in parallel on local machine with -DforkCount=x, it happen. diff --git a/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java b/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java index 6c50e8170901..a1e30d3bb5e9 100644 --- a/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java +++ b/service/src/test/org/apache/hive/service/server/TestHS2HttpServer.java @@ -76,7 +76,7 @@ public static void beforeTests() throws Exception { Integer.valueOf(ConfVars.HIVE_SERVER2_WEBUI_PORT.getDefaultValue())); apiBaseURL = "http://localhost:" + webUIPort + "/api/v1"; hiveConf = new HiveConf(); - hiveConf.set(ConfVars.METASTOREPWD.varname, metastorePasswd); + hiveConf.set(ConfVars.METASTORE_PWD.varname, metastorePasswd); hiveConf.set(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, webUIPort.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); @@ -298,7 +298,7 @@ public void testConfStrippedFromWebUI() throws Exception { if (line.contains(metastorePasswd)) { pwdValFound = line; } - if (line.contains(ConfVars.METASTOREPWD.varname)) { + if (line.contains(ConfVars.METASTORE_PWD.varname)) { pwdKeyFound = line; } } diff --git a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java index 04f66b4e5d73..92706af7033a 100644 --- a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java +++ b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPam.java @@ -35,19 +35,8 @@ import org.junit.Test; import javax.security.sasl.AuthenticationException; -import java.io.BufferedInputStream; -import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; -import java.security.KeyStore; -import java.security.KeyStoreException; -import java.security.NoSuchAlgorithmException; -import java.security.cert.Certificate; -import java.security.cert.CertificateException; -import java.security.cert.CertificateFactory; import java.util.HashMap; import java.util.Map; @@ -68,7 +57,7 @@ public static void beforeTests() throws Exception { MetaStoreTestUtils.findFreePortExcepting(Integer.valueOf(ConfVars.HIVE_SERVER2_WEBUI_PORT.getDefaultValue())); hiveConf = new HiveConf(); hiveConf.setBoolVar(ConfVars.HIVE_IN_TEST, true); - hiveConf.set(ConfVars.METASTOREPWD.varname, metastorePasswd); + hiveConf.set(ConfVars.METASTORE_PWD.varname, metastorePasswd); hiveConf.set(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, webUIPort.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java index 3e2ad22bc15a..cf1fc236ce86 100644 --- a/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java +++ b/service/src/test/org/apache/hive/service/server/TestHS2HttpServerPamConfiguration.java @@ -66,7 +66,7 @@ public static void beforeTests() throws Exception { hiveConf = new HiveConf(); hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_WEBUI_USE_PAM, true); hiveConf.setBoolVar(ConfVars.HIVE_IN_TEST, false); - hiveConf.set(ConfVars.METASTOREPWD.varname, metastorePasswd); + hiveConf.set(ConfVars.METASTORE_PWD.varname, metastorePasswd); hiveConf.set(ConfVars.HIVE_SERVER2_WEBUI_PORT.varname, webUIPort.toString()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/shims/0.23/pom.xml b/shims/0.23/pom.xml index 54602203fb39..b5dd4049817b 100644 --- a/shims/0.23/pom.xml +++ b/shims/0.23/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../../pom.xml org.apache.hive.shims diff --git a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java index 0437417e4b47..d4d6235d8324 100644 --- a/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java +++ b/shims/0.23/src/main/java/org/apache/hadoop/hive/shims/Hadoop23Shims.java @@ -367,8 +367,7 @@ public int getJobTrackerPort() throws UnsupportedOperationException { @Override public void setupConfiguration(Configuration conf) { conf.setBoolean(TezConfiguration.TEZ_LOCAL_MODE, true); - // TODO: enable below option once HIVE-26445 is investigated - // hiveConf.setBoolean("tez.local.mode.without.network", true); + conf.setBoolean("tez.local.mode.without.network", true); conf.setBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_OPTIMIZE_LOCAL_FETCH, true); conf.setBoolean(TezConfiguration.TEZ_IGNORE_LIB_URIS, true); diff --git a/shims/aggregator/pom.xml b/shims/aggregator/pom.xml index 47758421fa31..8f6f232364ee 100644 --- a/shims/aggregator/pom.xml +++ b/shims/aggregator/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../../pom.xml hive-shims diff --git a/shims/common/pom.xml b/shims/common/pom.xml index 9f7f0df1f4dc..ff5ffbeb98be 100644 --- a/shims/common/pom.xml +++ b/shims/common/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../../pom.xml org.apache.hive.shims @@ -73,6 +73,14 @@ org.apache.zookeeper zookeeper + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + org.apache.hadoop hadoop-core diff --git a/shims/pom.xml b/shims/pom.xml index 45ce1c5e95a1..be3e1a21baf8 100644 --- a/shims/pom.xml +++ b/shims/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-shims-aggregator diff --git a/standalone-metastore/metastore-common/pom.xml b/standalone-metastore/metastore-common/pom.xml index cc8938f7a9cf..5ac974f16940 100644 --- a/standalone-metastore/metastore-common/pom.xml +++ b/standalone-metastore/metastore-common/pom.xml @@ -16,7 +16,7 @@ hive-standalone-metastore org.apache.hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT 4.0.0 hive-standalone-metastore-common @@ -251,6 +251,14 @@ zookeeper ${zookeeper.version} + + ch.qos.logback + logback-classic + + + ch.qos.logback + logback-core + org.slf4j slf4j-log4j12 diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 9afe386894e1..5ab6a4c81734 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -2975,14 +2975,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1856; - ::apache::thrift::protocol::TType _etype1859; - xfer += iprot->readListBegin(_etype1859, _size1856); - this->success.resize(_size1856); - uint32_t _i1860; - for (_i1860 = 0; _i1860 < _size1856; ++_i1860) + uint32_t _size1862; + ::apache::thrift::protocol::TType _etype1865; + xfer += iprot->readListBegin(_etype1865, _size1862); + this->success.resize(_size1862); + uint32_t _i1866; + for (_i1866 = 0; _i1866 < _size1862; ++_i1866) { - xfer += iprot->readString(this->success[_i1860]); + xfer += iprot->readString(this->success[_i1866]); } xfer += iprot->readListEnd(); } @@ -3021,10 +3021,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1861; - for (_iter1861 = this->success.begin(); _iter1861 != this->success.end(); ++_iter1861) + std::vector ::const_iterator _iter1867; + for (_iter1867 = this->success.begin(); _iter1867 != this->success.end(); ++_iter1867) { - xfer += oprot->writeString((*_iter1861)); + xfer += oprot->writeString((*_iter1867)); } xfer += oprot->writeListEnd(); } @@ -3069,14 +3069,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1862; - ::apache::thrift::protocol::TType _etype1865; - xfer += iprot->readListBegin(_etype1865, _size1862); - (*(this->success)).resize(_size1862); - uint32_t _i1866; - for (_i1866 = 0; _i1866 < _size1862; ++_i1866) + uint32_t _size1868; + ::apache::thrift::protocol::TType _etype1871; + xfer += iprot->readListBegin(_etype1871, _size1868); + (*(this->success)).resize(_size1868); + uint32_t _i1872; + for (_i1872 = 0; _i1872 < _size1868; ++_i1872) { - xfer += iprot->readString((*(this->success))[_i1866]); + xfer += iprot->readString((*(this->success))[_i1872]); } xfer += iprot->readListEnd(); } @@ -3193,14 +3193,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1867; - ::apache::thrift::protocol::TType _etype1870; - xfer += iprot->readListBegin(_etype1870, _size1867); - this->success.resize(_size1867); - uint32_t _i1871; - for (_i1871 = 0; _i1871 < _size1867; ++_i1871) + uint32_t _size1873; + ::apache::thrift::protocol::TType _etype1876; + xfer += iprot->readListBegin(_etype1876, _size1873); + this->success.resize(_size1873); + uint32_t _i1877; + for (_i1877 = 0; _i1877 < _size1873; ++_i1877) { - xfer += iprot->readString(this->success[_i1871]); + xfer += iprot->readString(this->success[_i1877]); } xfer += iprot->readListEnd(); } @@ -3239,10 +3239,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1872; - for (_iter1872 = this->success.begin(); _iter1872 != this->success.end(); ++_iter1872) + std::vector ::const_iterator _iter1878; + for (_iter1878 = this->success.begin(); _iter1878 != this->success.end(); ++_iter1878) { - xfer += oprot->writeString((*_iter1872)); + xfer += oprot->writeString((*_iter1878)); } xfer += oprot->writeListEnd(); } @@ -3287,14 +3287,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1873; - ::apache::thrift::protocol::TType _etype1876; - xfer += iprot->readListBegin(_etype1876, _size1873); - (*(this->success)).resize(_size1873); - uint32_t _i1877; - for (_i1877 = 0; _i1877 < _size1873; ++_i1877) + uint32_t _size1879; + ::apache::thrift::protocol::TType _etype1882; + xfer += iprot->readListBegin(_etype1882, _size1879); + (*(this->success)).resize(_size1879); + uint32_t _i1883; + for (_i1883 = 0; _i1883 < _size1879; ++_i1883) { - xfer += iprot->readString((*(this->success))[_i1877]); + xfer += iprot->readString((*(this->success))[_i1883]); } xfer += iprot->readListEnd(); } @@ -4347,14 +4347,14 @@ uint32_t ThriftHiveMetastore_get_dataconnectors_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1878; - ::apache::thrift::protocol::TType _etype1881; - xfer += iprot->readListBegin(_etype1881, _size1878); - this->success.resize(_size1878); - uint32_t _i1882; - for (_i1882 = 0; _i1882 < _size1878; ++_i1882) + uint32_t _size1884; + ::apache::thrift::protocol::TType _etype1887; + xfer += iprot->readListBegin(_etype1887, _size1884); + this->success.resize(_size1884); + uint32_t _i1888; + for (_i1888 = 0; _i1888 < _size1884; ++_i1888) { - xfer += iprot->readString(this->success[_i1882]); + xfer += iprot->readString(this->success[_i1888]); } xfer += iprot->readListEnd(); } @@ -4393,10 +4393,10 @@ uint32_t ThriftHiveMetastore_get_dataconnectors_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1883; - for (_iter1883 = this->success.begin(); _iter1883 != this->success.end(); ++_iter1883) + std::vector ::const_iterator _iter1889; + for (_iter1889 = this->success.begin(); _iter1889 != this->success.end(); ++_iter1889) { - xfer += oprot->writeString((*_iter1883)); + xfer += oprot->writeString((*_iter1889)); } xfer += oprot->writeListEnd(); } @@ -4441,14 +4441,14 @@ uint32_t ThriftHiveMetastore_get_dataconnectors_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1884; - ::apache::thrift::protocol::TType _etype1887; - xfer += iprot->readListBegin(_etype1887, _size1884); - (*(this->success)).resize(_size1884); - uint32_t _i1888; - for (_i1888 = 0; _i1888 < _size1884; ++_i1888) + uint32_t _size1890; + ::apache::thrift::protocol::TType _etype1893; + xfer += iprot->readListBegin(_etype1893, _size1890); + (*(this->success)).resize(_size1890); + uint32_t _i1894; + for (_i1894 = 0; _i1894 < _size1890; ++_i1894) { - xfer += iprot->readString((*(this->success))[_i1888]); + xfer += iprot->readString((*(this->success))[_i1894]); } xfer += iprot->readListEnd(); } @@ -5510,17 +5510,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1889; - ::apache::thrift::protocol::TType _ktype1890; - ::apache::thrift::protocol::TType _vtype1891; - xfer += iprot->readMapBegin(_ktype1890, _vtype1891, _size1889); - uint32_t _i1893; - for (_i1893 = 0; _i1893 < _size1889; ++_i1893) + uint32_t _size1895; + ::apache::thrift::protocol::TType _ktype1896; + ::apache::thrift::protocol::TType _vtype1897; + xfer += iprot->readMapBegin(_ktype1896, _vtype1897, _size1895); + uint32_t _i1899; + for (_i1899 = 0; _i1899 < _size1895; ++_i1899) { - std::string _key1894; - xfer += iprot->readString(_key1894); - Type& _val1895 = this->success[_key1894]; - xfer += _val1895.read(iprot); + std::string _key1900; + xfer += iprot->readString(_key1900); + Type& _val1901 = this->success[_key1900]; + xfer += _val1901.read(iprot); } xfer += iprot->readMapEnd(); } @@ -5559,11 +5559,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter1896; - for (_iter1896 = this->success.begin(); _iter1896 != this->success.end(); ++_iter1896) + std::map ::const_iterator _iter1902; + for (_iter1902 = this->success.begin(); _iter1902 != this->success.end(); ++_iter1902) { - xfer += oprot->writeString(_iter1896->first); - xfer += _iter1896->second.write(oprot); + xfer += oprot->writeString(_iter1902->first); + xfer += _iter1902->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -5608,17 +5608,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1897; - ::apache::thrift::protocol::TType _ktype1898; - ::apache::thrift::protocol::TType _vtype1899; - xfer += iprot->readMapBegin(_ktype1898, _vtype1899, _size1897); - uint32_t _i1901; - for (_i1901 = 0; _i1901 < _size1897; ++_i1901) + uint32_t _size1903; + ::apache::thrift::protocol::TType _ktype1904; + ::apache::thrift::protocol::TType _vtype1905; + xfer += iprot->readMapBegin(_ktype1904, _vtype1905, _size1903); + uint32_t _i1907; + for (_i1907 = 0; _i1907 < _size1903; ++_i1907) { - std::string _key1902; - xfer += iprot->readString(_key1902); - Type& _val1903 = (*(this->success))[_key1902]; - xfer += _val1903.read(iprot); + std::string _key1908; + xfer += iprot->readString(_key1908); + Type& _val1909 = (*(this->success))[_key1908]; + xfer += _val1909.read(iprot); } xfer += iprot->readMapEnd(); } @@ -5772,14 +5772,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1904; - ::apache::thrift::protocol::TType _etype1907; - xfer += iprot->readListBegin(_etype1907, _size1904); - this->success.resize(_size1904); - uint32_t _i1908; - for (_i1908 = 0; _i1908 < _size1904; ++_i1908) + uint32_t _size1910; + ::apache::thrift::protocol::TType _etype1913; + xfer += iprot->readListBegin(_etype1913, _size1910); + this->success.resize(_size1910); + uint32_t _i1914; + for (_i1914 = 0; _i1914 < _size1910; ++_i1914) { - xfer += this->success[_i1908].read(iprot); + xfer += this->success[_i1914].read(iprot); } xfer += iprot->readListEnd(); } @@ -5834,10 +5834,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1909; - for (_iter1909 = this->success.begin(); _iter1909 != this->success.end(); ++_iter1909) + std::vector ::const_iterator _iter1915; + for (_iter1915 = this->success.begin(); _iter1915 != this->success.end(); ++_iter1915) { - xfer += (*_iter1909).write(oprot); + xfer += (*_iter1915).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5890,14 +5890,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1910; - ::apache::thrift::protocol::TType _etype1913; - xfer += iprot->readListBegin(_etype1913, _size1910); - (*(this->success)).resize(_size1910); - uint32_t _i1914; - for (_i1914 = 0; _i1914 < _size1910; ++_i1914) + uint32_t _size1916; + ::apache::thrift::protocol::TType _etype1919; + xfer += iprot->readListBegin(_etype1919, _size1916); + (*(this->success)).resize(_size1916); + uint32_t _i1920; + for (_i1920 = 0; _i1920 < _size1916; ++_i1920) { - xfer += (*(this->success))[_i1914].read(iprot); + xfer += (*(this->success))[_i1920].read(iprot); } xfer += iprot->readListEnd(); } @@ -6083,14 +6083,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1915; - ::apache::thrift::protocol::TType _etype1918; - xfer += iprot->readListBegin(_etype1918, _size1915); - this->success.resize(_size1915); - uint32_t _i1919; - for (_i1919 = 0; _i1919 < _size1915; ++_i1919) + uint32_t _size1921; + ::apache::thrift::protocol::TType _etype1924; + xfer += iprot->readListBegin(_etype1924, _size1921); + this->success.resize(_size1921); + uint32_t _i1925; + for (_i1925 = 0; _i1925 < _size1921; ++_i1925) { - xfer += this->success[_i1919].read(iprot); + xfer += this->success[_i1925].read(iprot); } xfer += iprot->readListEnd(); } @@ -6145,10 +6145,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1920; - for (_iter1920 = this->success.begin(); _iter1920 != this->success.end(); ++_iter1920) + std::vector ::const_iterator _iter1926; + for (_iter1926 = this->success.begin(); _iter1926 != this->success.end(); ++_iter1926) { - xfer += (*_iter1920).write(oprot); + xfer += (*_iter1926).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6201,14 +6201,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1921; - ::apache::thrift::protocol::TType _etype1924; - xfer += iprot->readListBegin(_etype1924, _size1921); - (*(this->success)).resize(_size1921); - uint32_t _i1925; - for (_i1925 = 0; _i1925 < _size1921; ++_i1925) + uint32_t _size1927; + ::apache::thrift::protocol::TType _etype1930; + xfer += iprot->readListBegin(_etype1930, _size1927); + (*(this->success)).resize(_size1927); + uint32_t _i1931; + for (_i1931 = 0; _i1931 < _size1927; ++_i1931) { - xfer += (*(this->success))[_i1925].read(iprot); + xfer += (*(this->success))[_i1931].read(iprot); } xfer += iprot->readListEnd(); } @@ -6625,14 +6625,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1926; - ::apache::thrift::protocol::TType _etype1929; - xfer += iprot->readListBegin(_etype1929, _size1926); - this->success.resize(_size1926); - uint32_t _i1930; - for (_i1930 = 0; _i1930 < _size1926; ++_i1930) + uint32_t _size1932; + ::apache::thrift::protocol::TType _etype1935; + xfer += iprot->readListBegin(_etype1935, _size1932); + this->success.resize(_size1932); + uint32_t _i1936; + for (_i1936 = 0; _i1936 < _size1932; ++_i1936) { - xfer += this->success[_i1930].read(iprot); + xfer += this->success[_i1936].read(iprot); } xfer += iprot->readListEnd(); } @@ -6687,10 +6687,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1931; - for (_iter1931 = this->success.begin(); _iter1931 != this->success.end(); ++_iter1931) + std::vector ::const_iterator _iter1937; + for (_iter1937 = this->success.begin(); _iter1937 != this->success.end(); ++_iter1937) { - xfer += (*_iter1931).write(oprot); + xfer += (*_iter1937).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6743,14 +6743,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1932; - ::apache::thrift::protocol::TType _etype1935; - xfer += iprot->readListBegin(_etype1935, _size1932); - (*(this->success)).resize(_size1932); - uint32_t _i1936; - for (_i1936 = 0; _i1936 < _size1932; ++_i1936) + uint32_t _size1938; + ::apache::thrift::protocol::TType _etype1941; + xfer += iprot->readListBegin(_etype1941, _size1938); + (*(this->success)).resize(_size1938); + uint32_t _i1942; + for (_i1942 = 0; _i1942 < _size1938; ++_i1942) { - xfer += (*(this->success))[_i1936].read(iprot); + xfer += (*(this->success))[_i1942].read(iprot); } xfer += iprot->readListEnd(); } @@ -6936,14 +6936,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1937; - ::apache::thrift::protocol::TType _etype1940; - xfer += iprot->readListBegin(_etype1940, _size1937); - this->success.resize(_size1937); - uint32_t _i1941; - for (_i1941 = 0; _i1941 < _size1937; ++_i1941) + uint32_t _size1943; + ::apache::thrift::protocol::TType _etype1946; + xfer += iprot->readListBegin(_etype1946, _size1943); + this->success.resize(_size1943); + uint32_t _i1947; + for (_i1947 = 0; _i1947 < _size1943; ++_i1947) { - xfer += this->success[_i1941].read(iprot); + xfer += this->success[_i1947].read(iprot); } xfer += iprot->readListEnd(); } @@ -6998,10 +6998,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1942; - for (_iter1942 = this->success.begin(); _iter1942 != this->success.end(); ++_iter1942) + std::vector ::const_iterator _iter1948; + for (_iter1948 = this->success.begin(); _iter1948 != this->success.end(); ++_iter1948) { - xfer += (*_iter1942).write(oprot); + xfer += (*_iter1948).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7054,14 +7054,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1943; - ::apache::thrift::protocol::TType _etype1946; - xfer += iprot->readListBegin(_etype1946, _size1943); - (*(this->success)).resize(_size1943); - uint32_t _i1947; - for (_i1947 = 0; _i1947 < _size1943; ++_i1947) + uint32_t _size1949; + ::apache::thrift::protocol::TType _etype1952; + xfer += iprot->readListBegin(_etype1952, _size1949); + (*(this->success)).resize(_size1949); + uint32_t _i1953; + for (_i1953 = 0; _i1953 < _size1949; ++_i1953) { - xfer += (*(this->success))[_i1947].read(iprot); + xfer += (*(this->success))[_i1953].read(iprot); } xfer += iprot->readListEnd(); } @@ -7901,14 +7901,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->primaryKeys.clear(); - uint32_t _size1948; - ::apache::thrift::protocol::TType _etype1951; - xfer += iprot->readListBegin(_etype1951, _size1948); - this->primaryKeys.resize(_size1948); - uint32_t _i1952; - for (_i1952 = 0; _i1952 < _size1948; ++_i1952) + uint32_t _size1954; + ::apache::thrift::protocol::TType _etype1957; + xfer += iprot->readListBegin(_etype1957, _size1954); + this->primaryKeys.resize(_size1954); + uint32_t _i1958; + for (_i1958 = 0; _i1958 < _size1954; ++_i1958) { - xfer += this->primaryKeys[_i1952].read(iprot); + xfer += this->primaryKeys[_i1958].read(iprot); } xfer += iprot->readListEnd(); } @@ -7921,14 +7921,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->foreignKeys.clear(); - uint32_t _size1953; - ::apache::thrift::protocol::TType _etype1956; - xfer += iprot->readListBegin(_etype1956, _size1953); - this->foreignKeys.resize(_size1953); - uint32_t _i1957; - for (_i1957 = 0; _i1957 < _size1953; ++_i1957) + uint32_t _size1959; + ::apache::thrift::protocol::TType _etype1962; + xfer += iprot->readListBegin(_etype1962, _size1959); + this->foreignKeys.resize(_size1959); + uint32_t _i1963; + for (_i1963 = 0; _i1963 < _size1959; ++_i1963) { - xfer += this->foreignKeys[_i1957].read(iprot); + xfer += this->foreignKeys[_i1963].read(iprot); } xfer += iprot->readListEnd(); } @@ -7941,14 +7941,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->uniqueConstraints.clear(); - uint32_t _size1958; - ::apache::thrift::protocol::TType _etype1961; - xfer += iprot->readListBegin(_etype1961, _size1958); - this->uniqueConstraints.resize(_size1958); - uint32_t _i1962; - for (_i1962 = 0; _i1962 < _size1958; ++_i1962) + uint32_t _size1964; + ::apache::thrift::protocol::TType _etype1967; + xfer += iprot->readListBegin(_etype1967, _size1964); + this->uniqueConstraints.resize(_size1964); + uint32_t _i1968; + for (_i1968 = 0; _i1968 < _size1964; ++_i1968) { - xfer += this->uniqueConstraints[_i1962].read(iprot); + xfer += this->uniqueConstraints[_i1968].read(iprot); } xfer += iprot->readListEnd(); } @@ -7961,14 +7961,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->notNullConstraints.clear(); - uint32_t _size1963; - ::apache::thrift::protocol::TType _etype1966; - xfer += iprot->readListBegin(_etype1966, _size1963); - this->notNullConstraints.resize(_size1963); - uint32_t _i1967; - for (_i1967 = 0; _i1967 < _size1963; ++_i1967) + uint32_t _size1969; + ::apache::thrift::protocol::TType _etype1972; + xfer += iprot->readListBegin(_etype1972, _size1969); + this->notNullConstraints.resize(_size1969); + uint32_t _i1973; + for (_i1973 = 0; _i1973 < _size1969; ++_i1973) { - xfer += this->notNullConstraints[_i1967].read(iprot); + xfer += this->notNullConstraints[_i1973].read(iprot); } xfer += iprot->readListEnd(); } @@ -7981,14 +7981,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->defaultConstraints.clear(); - uint32_t _size1968; - ::apache::thrift::protocol::TType _etype1971; - xfer += iprot->readListBegin(_etype1971, _size1968); - this->defaultConstraints.resize(_size1968); - uint32_t _i1972; - for (_i1972 = 0; _i1972 < _size1968; ++_i1972) + uint32_t _size1974; + ::apache::thrift::protocol::TType _etype1977; + xfer += iprot->readListBegin(_etype1977, _size1974); + this->defaultConstraints.resize(_size1974); + uint32_t _i1978; + for (_i1978 = 0; _i1978 < _size1974; ++_i1978) { - xfer += this->defaultConstraints[_i1972].read(iprot); + xfer += this->defaultConstraints[_i1978].read(iprot); } xfer += iprot->readListEnd(); } @@ -8001,14 +8001,14 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->checkConstraints.clear(); - uint32_t _size1973; - ::apache::thrift::protocol::TType _etype1976; - xfer += iprot->readListBegin(_etype1976, _size1973); - this->checkConstraints.resize(_size1973); - uint32_t _i1977; - for (_i1977 = 0; _i1977 < _size1973; ++_i1977) + uint32_t _size1979; + ::apache::thrift::protocol::TType _etype1982; + xfer += iprot->readListBegin(_etype1982, _size1979); + this->checkConstraints.resize(_size1979); + uint32_t _i1983; + for (_i1983 = 0; _i1983 < _size1979; ++_i1983) { - xfer += this->checkConstraints[_i1977].read(iprot); + xfer += this->checkConstraints[_i1983].read(iprot); } xfer += iprot->readListEnd(); } @@ -8041,10 +8041,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); - std::vector ::const_iterator _iter1978; - for (_iter1978 = this->primaryKeys.begin(); _iter1978 != this->primaryKeys.end(); ++_iter1978) + std::vector ::const_iterator _iter1984; + for (_iter1984 = this->primaryKeys.begin(); _iter1984 != this->primaryKeys.end(); ++_iter1984) { - xfer += (*_iter1978).write(oprot); + xfer += (*_iter1984).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8053,10 +8053,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); - std::vector ::const_iterator _iter1979; - for (_iter1979 = this->foreignKeys.begin(); _iter1979 != this->foreignKeys.end(); ++_iter1979) + std::vector ::const_iterator _iter1985; + for (_iter1985 = this->foreignKeys.begin(); _iter1985 != this->foreignKeys.end(); ++_iter1985) { - xfer += (*_iter1979).write(oprot); + xfer += (*_iter1985).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8065,10 +8065,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->uniqueConstraints.size())); - std::vector ::const_iterator _iter1980; - for (_iter1980 = this->uniqueConstraints.begin(); _iter1980 != this->uniqueConstraints.end(); ++_iter1980) + std::vector ::const_iterator _iter1986; + for (_iter1986 = this->uniqueConstraints.begin(); _iter1986 != this->uniqueConstraints.end(); ++_iter1986) { - xfer += (*_iter1980).write(oprot); + xfer += (*_iter1986).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8077,10 +8077,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->notNullConstraints.size())); - std::vector ::const_iterator _iter1981; - for (_iter1981 = this->notNullConstraints.begin(); _iter1981 != this->notNullConstraints.end(); ++_iter1981) + std::vector ::const_iterator _iter1987; + for (_iter1987 = this->notNullConstraints.begin(); _iter1987 != this->notNullConstraints.end(); ++_iter1987) { - xfer += (*_iter1981).write(oprot); + xfer += (*_iter1987).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8089,10 +8089,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->defaultConstraints.size())); - std::vector ::const_iterator _iter1982; - for (_iter1982 = this->defaultConstraints.begin(); _iter1982 != this->defaultConstraints.end(); ++_iter1982) + std::vector ::const_iterator _iter1988; + for (_iter1988 = this->defaultConstraints.begin(); _iter1988 != this->defaultConstraints.end(); ++_iter1988) { - xfer += (*_iter1982).write(oprot); + xfer += (*_iter1988).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8101,10 +8101,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_args::write(::apache: xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->checkConstraints.size())); - std::vector ::const_iterator _iter1983; - for (_iter1983 = this->checkConstraints.begin(); _iter1983 != this->checkConstraints.end(); ++_iter1983) + std::vector ::const_iterator _iter1989; + for (_iter1989 = this->checkConstraints.begin(); _iter1989 != this->checkConstraints.end(); ++_iter1989) { - xfer += (*_iter1983).write(oprot); + xfer += (*_iter1989).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8132,10 +8132,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->primaryKeys)).size())); - std::vector ::const_iterator _iter1984; - for (_iter1984 = (*(this->primaryKeys)).begin(); _iter1984 != (*(this->primaryKeys)).end(); ++_iter1984) + std::vector ::const_iterator _iter1990; + for (_iter1990 = (*(this->primaryKeys)).begin(); _iter1990 != (*(this->primaryKeys)).end(); ++_iter1990) { - xfer += (*_iter1984).write(oprot); + xfer += (*_iter1990).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8144,10 +8144,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->foreignKeys)).size())); - std::vector ::const_iterator _iter1985; - for (_iter1985 = (*(this->foreignKeys)).begin(); _iter1985 != (*(this->foreignKeys)).end(); ++_iter1985) + std::vector ::const_iterator _iter1991; + for (_iter1991 = (*(this->foreignKeys)).begin(); _iter1991 != (*(this->foreignKeys)).end(); ++_iter1991) { - xfer += (*_iter1985).write(oprot); + xfer += (*_iter1991).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8156,10 +8156,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("uniqueConstraints", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->uniqueConstraints)).size())); - std::vector ::const_iterator _iter1986; - for (_iter1986 = (*(this->uniqueConstraints)).begin(); _iter1986 != (*(this->uniqueConstraints)).end(); ++_iter1986) + std::vector ::const_iterator _iter1992; + for (_iter1992 = (*(this->uniqueConstraints)).begin(); _iter1992 != (*(this->uniqueConstraints)).end(); ++_iter1992) { - xfer += (*_iter1986).write(oprot); + xfer += (*_iter1992).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8168,10 +8168,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("notNullConstraints", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->notNullConstraints)).size())); - std::vector ::const_iterator _iter1987; - for (_iter1987 = (*(this->notNullConstraints)).begin(); _iter1987 != (*(this->notNullConstraints)).end(); ++_iter1987) + std::vector ::const_iterator _iter1993; + for (_iter1993 = (*(this->notNullConstraints)).begin(); _iter1993 != (*(this->notNullConstraints)).end(); ++_iter1993) { - xfer += (*_iter1987).write(oprot); + xfer += (*_iter1993).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8180,10 +8180,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("defaultConstraints", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->defaultConstraints)).size())); - std::vector ::const_iterator _iter1988; - for (_iter1988 = (*(this->defaultConstraints)).begin(); _iter1988 != (*(this->defaultConstraints)).end(); ++_iter1988) + std::vector ::const_iterator _iter1994; + for (_iter1994 = (*(this->defaultConstraints)).begin(); _iter1994 != (*(this->defaultConstraints)).end(); ++_iter1994) { - xfer += (*_iter1988).write(oprot); + xfer += (*_iter1994).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8192,10 +8192,10 @@ uint32_t ThriftHiveMetastore_create_table_with_constraints_pargs::write(::apache xfer += oprot->writeFieldBegin("checkConstraints", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->checkConstraints)).size())); - std::vector ::const_iterator _iter1989; - for (_iter1989 = (*(this->checkConstraints)).begin(); _iter1989 != (*(this->checkConstraints)).end(); ++_iter1989) + std::vector ::const_iterator _iter1995; + for (_iter1995 = (*(this->checkConstraints)).begin(); _iter1995 != (*(this->checkConstraints)).end(); ++_iter1995) { - xfer += (*_iter1989).write(oprot); + xfer += (*_iter1995).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10877,14 +10877,14 @@ uint32_t ThriftHiveMetastore_truncate_table_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size1990; - ::apache::thrift::protocol::TType _etype1993; - xfer += iprot->readListBegin(_etype1993, _size1990); - this->partNames.resize(_size1990); - uint32_t _i1994; - for (_i1994 = 0; _i1994 < _size1990; ++_i1994) + uint32_t _size1996; + ::apache::thrift::protocol::TType _etype1999; + xfer += iprot->readListBegin(_etype1999, _size1996); + this->partNames.resize(_size1996); + uint32_t _i2000; + for (_i2000 = 0; _i2000 < _size1996; ++_i2000) { - xfer += iprot->readString(this->partNames[_i1994]); + xfer += iprot->readString(this->partNames[_i2000]); } xfer += iprot->readListEnd(); } @@ -10921,10 +10921,10 @@ uint32_t ThriftHiveMetastore_truncate_table_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter1995; - for (_iter1995 = this->partNames.begin(); _iter1995 != this->partNames.end(); ++_iter1995) + std::vector ::const_iterator _iter2001; + for (_iter2001 = this->partNames.begin(); _iter2001 != this->partNames.end(); ++_iter2001) { - xfer += oprot->writeString((*_iter1995)); + xfer += oprot->writeString((*_iter2001)); } xfer += oprot->writeListEnd(); } @@ -10956,10 +10956,10 @@ uint32_t ThriftHiveMetastore_truncate_table_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->partNames)).size())); - std::vector ::const_iterator _iter1996; - for (_iter1996 = (*(this->partNames)).begin(); _iter1996 != (*(this->partNames)).end(); ++_iter1996) + std::vector ::const_iterator _iter2002; + for (_iter2002 = (*(this->partNames)).begin(); _iter2002 != (*(this->partNames)).end(); ++_iter2002) { - xfer += oprot->writeString((*_iter1996)); + xfer += oprot->writeString((*_iter2002)); } xfer += oprot->writeListEnd(); } @@ -11410,14 +11410,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1997; - ::apache::thrift::protocol::TType _etype2000; - xfer += iprot->readListBegin(_etype2000, _size1997); - this->success.resize(_size1997); - uint32_t _i2001; - for (_i2001 = 0; _i2001 < _size1997; ++_i2001) + uint32_t _size2003; + ::apache::thrift::protocol::TType _etype2006; + xfer += iprot->readListBegin(_etype2006, _size2003); + this->success.resize(_size2003); + uint32_t _i2007; + for (_i2007 = 0; _i2007 < _size2003; ++_i2007) { - xfer += iprot->readString(this->success[_i2001]); + xfer += iprot->readString(this->success[_i2007]); } xfer += iprot->readListEnd(); } @@ -11456,10 +11456,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2002; - for (_iter2002 = this->success.begin(); _iter2002 != this->success.end(); ++_iter2002) + std::vector ::const_iterator _iter2008; + for (_iter2008 = this->success.begin(); _iter2008 != this->success.end(); ++_iter2008) { - xfer += oprot->writeString((*_iter2002)); + xfer += oprot->writeString((*_iter2008)); } xfer += oprot->writeListEnd(); } @@ -11504,14 +11504,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2003; - ::apache::thrift::protocol::TType _etype2006; - xfer += iprot->readListBegin(_etype2006, _size2003); - (*(this->success)).resize(_size2003); - uint32_t _i2007; - for (_i2007 = 0; _i2007 < _size2003; ++_i2007) + uint32_t _size2009; + ::apache::thrift::protocol::TType _etype2012; + xfer += iprot->readListBegin(_etype2012, _size2009); + (*(this->success)).resize(_size2009); + uint32_t _i2013; + for (_i2013 = 0; _i2013 < _size2009; ++_i2013) { - xfer += iprot->readString((*(this->success))[_i2007]); + xfer += iprot->readString((*(this->success))[_i2013]); } xfer += iprot->readListEnd(); } @@ -11681,14 +11681,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2008; - ::apache::thrift::protocol::TType _etype2011; - xfer += iprot->readListBegin(_etype2011, _size2008); - this->success.resize(_size2008); - uint32_t _i2012; - for (_i2012 = 0; _i2012 < _size2008; ++_i2012) + uint32_t _size2014; + ::apache::thrift::protocol::TType _etype2017; + xfer += iprot->readListBegin(_etype2017, _size2014); + this->success.resize(_size2014); + uint32_t _i2018; + for (_i2018 = 0; _i2018 < _size2014; ++_i2018) { - xfer += iprot->readString(this->success[_i2012]); + xfer += iprot->readString(this->success[_i2018]); } xfer += iprot->readListEnd(); } @@ -11727,10 +11727,10 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2013; - for (_iter2013 = this->success.begin(); _iter2013 != this->success.end(); ++_iter2013) + std::vector ::const_iterator _iter2019; + for (_iter2019 = this->success.begin(); _iter2019 != this->success.end(); ++_iter2019) { - xfer += oprot->writeString((*_iter2013)); + xfer += oprot->writeString((*_iter2019)); } xfer += oprot->writeListEnd(); } @@ -11775,14 +11775,14 @@ uint32_t ThriftHiveMetastore_get_tables_by_type_presult::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2014; - ::apache::thrift::protocol::TType _etype2017; - xfer += iprot->readListBegin(_etype2017, _size2014); - (*(this->success)).resize(_size2014); - uint32_t _i2018; - for (_i2018 = 0; _i2018 < _size2014; ++_i2018) + uint32_t _size2020; + ::apache::thrift::protocol::TType _etype2023; + xfer += iprot->readListBegin(_etype2023, _size2020); + (*(this->success)).resize(_size2020); + uint32_t _i2024; + for (_i2024 = 0; _i2024 < _size2020; ++_i2024) { - xfer += iprot->readString((*(this->success))[_i2018]); + xfer += iprot->readString((*(this->success))[_i2024]); } xfer += iprot->readListEnd(); } @@ -11899,14 +11899,14 @@ uint32_t ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_res if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2019; - ::apache::thrift::protocol::TType _etype2022; - xfer += iprot->readListBegin(_etype2022, _size2019); - this->success.resize(_size2019); - uint32_t _i2023; - for (_i2023 = 0; _i2023 < _size2019; ++_i2023) + uint32_t _size2025; + ::apache::thrift::protocol::TType _etype2028; + xfer += iprot->readListBegin(_etype2028, _size2025); + this->success.resize(_size2025); + uint32_t _i2029; + for (_i2029 = 0; _i2029 < _size2025; ++_i2029) { - xfer += this->success[_i2023].read(iprot); + xfer += this->success[_i2029].read(iprot); } xfer += iprot->readListEnd(); } @@ -11945,10 +11945,10 @@ uint32_t ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_res xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter2024; - for (_iter2024 = this->success.begin(); _iter2024 != this->success.end(); ++_iter2024) + std::vector
::const_iterator _iter2030; + for (_iter2030 = this->success.begin(); _iter2030 != this->success.end(); ++_iter2030) { - xfer += (*_iter2024).write(oprot); + xfer += (*_iter2030).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11993,14 +11993,14 @@ uint32_t ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_pre if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2025; - ::apache::thrift::protocol::TType _etype2028; - xfer += iprot->readListBegin(_etype2028, _size2025); - (*(this->success)).resize(_size2025); - uint32_t _i2029; - for (_i2029 = 0; _i2029 < _size2025; ++_i2029) + uint32_t _size2031; + ::apache::thrift::protocol::TType _etype2034; + xfer += iprot->readListBegin(_etype2034, _size2031); + (*(this->success)).resize(_size2031); + uint32_t _i2035; + for (_i2035 = 0; _i2035 < _size2031; ++_i2035) { - xfer += (*(this->success))[_i2029].read(iprot); + xfer += (*(this->success))[_i2035].read(iprot); } xfer += iprot->readListEnd(); } @@ -12138,14 +12138,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2030; - ::apache::thrift::protocol::TType _etype2033; - xfer += iprot->readListBegin(_etype2033, _size2030); - this->success.resize(_size2030); - uint32_t _i2034; - for (_i2034 = 0; _i2034 < _size2030; ++_i2034) + uint32_t _size2036; + ::apache::thrift::protocol::TType _etype2039; + xfer += iprot->readListBegin(_etype2039, _size2036); + this->success.resize(_size2036); + uint32_t _i2040; + for (_i2040 = 0; _i2040 < _size2036; ++_i2040) { - xfer += iprot->readString(this->success[_i2034]); + xfer += iprot->readString(this->success[_i2040]); } xfer += iprot->readListEnd(); } @@ -12184,10 +12184,10 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_result::write( xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2035; - for (_iter2035 = this->success.begin(); _iter2035 != this->success.end(); ++_iter2035) + std::vector ::const_iterator _iter2041; + for (_iter2041 = this->success.begin(); _iter2041 != this->success.end(); ++_iter2041) { - xfer += oprot->writeString((*_iter2035)); + xfer += oprot->writeString((*_iter2041)); } xfer += oprot->writeListEnd(); } @@ -12232,14 +12232,14 @@ uint32_t ThriftHiveMetastore_get_materialized_views_for_rewriting_presult::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2036; - ::apache::thrift::protocol::TType _etype2039; - xfer += iprot->readListBegin(_etype2039, _size2036); - (*(this->success)).resize(_size2036); - uint32_t _i2040; - for (_i2040 = 0; _i2040 < _size2036; ++_i2040) + uint32_t _size2042; + ::apache::thrift::protocol::TType _etype2045; + xfer += iprot->readListBegin(_etype2045, _size2042); + (*(this->success)).resize(_size2042); + uint32_t _i2046; + for (_i2046 = 0; _i2046 < _size2042; ++_i2046) { - xfer += iprot->readString((*(this->success))[_i2040]); + xfer += iprot->readString((*(this->success))[_i2046]); } xfer += iprot->readListEnd(); } @@ -12314,14 +12314,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size2041; - ::apache::thrift::protocol::TType _etype2044; - xfer += iprot->readListBegin(_etype2044, _size2041); - this->tbl_types.resize(_size2041); - uint32_t _i2045; - for (_i2045 = 0; _i2045 < _size2041; ++_i2045) + uint32_t _size2047; + ::apache::thrift::protocol::TType _etype2050; + xfer += iprot->readListBegin(_etype2050, _size2047); + this->tbl_types.resize(_size2047); + uint32_t _i2051; + for (_i2051 = 0; _i2051 < _size2047; ++_i2051) { - xfer += iprot->readString(this->tbl_types[_i2045]); + xfer += iprot->readString(this->tbl_types[_i2051]); } xfer += iprot->readListEnd(); } @@ -12358,10 +12358,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter2046; - for (_iter2046 = this->tbl_types.begin(); _iter2046 != this->tbl_types.end(); ++_iter2046) + std::vector ::const_iterator _iter2052; + for (_iter2052 = this->tbl_types.begin(); _iter2052 != this->tbl_types.end(); ++_iter2052) { - xfer += oprot->writeString((*_iter2046)); + xfer += oprot->writeString((*_iter2052)); } xfer += oprot->writeListEnd(); } @@ -12393,10 +12393,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter2047; - for (_iter2047 = (*(this->tbl_types)).begin(); _iter2047 != (*(this->tbl_types)).end(); ++_iter2047) + std::vector ::const_iterator _iter2053; + for (_iter2053 = (*(this->tbl_types)).begin(); _iter2053 != (*(this->tbl_types)).end(); ++_iter2053) { - xfer += oprot->writeString((*_iter2047)); + xfer += oprot->writeString((*_iter2053)); } xfer += oprot->writeListEnd(); } @@ -12437,14 +12437,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2048; - ::apache::thrift::protocol::TType _etype2051; - xfer += iprot->readListBegin(_etype2051, _size2048); - this->success.resize(_size2048); - uint32_t _i2052; - for (_i2052 = 0; _i2052 < _size2048; ++_i2052) + uint32_t _size2054; + ::apache::thrift::protocol::TType _etype2057; + xfer += iprot->readListBegin(_etype2057, _size2054); + this->success.resize(_size2054); + uint32_t _i2058; + for (_i2058 = 0; _i2058 < _size2054; ++_i2058) { - xfer += this->success[_i2052].read(iprot); + xfer += this->success[_i2058].read(iprot); } xfer += iprot->readListEnd(); } @@ -12483,10 +12483,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2053; - for (_iter2053 = this->success.begin(); _iter2053 != this->success.end(); ++_iter2053) + std::vector ::const_iterator _iter2059; + for (_iter2059 = this->success.begin(); _iter2059 != this->success.end(); ++_iter2059) { - xfer += (*_iter2053).write(oprot); + xfer += (*_iter2059).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12531,14 +12531,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2054; - ::apache::thrift::protocol::TType _etype2057; - xfer += iprot->readListBegin(_etype2057, _size2054); - (*(this->success)).resize(_size2054); - uint32_t _i2058; - for (_i2058 = 0; _i2058 < _size2054; ++_i2058) + uint32_t _size2060; + ::apache::thrift::protocol::TType _etype2063; + xfer += iprot->readListBegin(_etype2063, _size2060); + (*(this->success)).resize(_size2060); + uint32_t _i2064; + for (_i2064 = 0; _i2064 < _size2060; ++_i2064) { - xfer += (*(this->success))[_i2058].read(iprot); + xfer += (*(this->success))[_i2064].read(iprot); } xfer += iprot->readListEnd(); } @@ -12676,14 +12676,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2059; - ::apache::thrift::protocol::TType _etype2062; - xfer += iprot->readListBegin(_etype2062, _size2059); - this->success.resize(_size2059); - uint32_t _i2063; - for (_i2063 = 0; _i2063 < _size2059; ++_i2063) + uint32_t _size2065; + ::apache::thrift::protocol::TType _etype2068; + xfer += iprot->readListBegin(_etype2068, _size2065); + this->success.resize(_size2065); + uint32_t _i2069; + for (_i2069 = 0; _i2069 < _size2065; ++_i2069) { - xfer += iprot->readString(this->success[_i2063]); + xfer += iprot->readString(this->success[_i2069]); } xfer += iprot->readListEnd(); } @@ -12722,10 +12722,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2064; - for (_iter2064 = this->success.begin(); _iter2064 != this->success.end(); ++_iter2064) + std::vector ::const_iterator _iter2070; + for (_iter2070 = this->success.begin(); _iter2070 != this->success.end(); ++_iter2070) { - xfer += oprot->writeString((*_iter2064)); + xfer += oprot->writeString((*_iter2070)); } xfer += oprot->writeListEnd(); } @@ -12770,14 +12770,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2065; - ::apache::thrift::protocol::TType _etype2068; - xfer += iprot->readListBegin(_etype2068, _size2065); - (*(this->success)).resize(_size2065); - uint32_t _i2069; - for (_i2069 = 0; _i2069 < _size2065; ++_i2069) + uint32_t _size2071; + ::apache::thrift::protocol::TType _etype2074; + xfer += iprot->readListBegin(_etype2074, _size2071); + (*(this->success)).resize(_size2071); + uint32_t _i2075; + for (_i2075 = 0; _i2075 < _size2071; ++_i2075) { - xfer += iprot->readString((*(this->success))[_i2069]); + xfer += iprot->readString((*(this->success))[_i2075]); } xfer += iprot->readListEnd(); } @@ -13087,14 +13087,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size2070; - ::apache::thrift::protocol::TType _etype2073; - xfer += iprot->readListBegin(_etype2073, _size2070); - this->tbl_names.resize(_size2070); - uint32_t _i2074; - for (_i2074 = 0; _i2074 < _size2070; ++_i2074) + uint32_t _size2076; + ::apache::thrift::protocol::TType _etype2079; + xfer += iprot->readListBegin(_etype2079, _size2076); + this->tbl_names.resize(_size2076); + uint32_t _i2080; + for (_i2080 = 0; _i2080 < _size2076; ++_i2080) { - xfer += iprot->readString(this->tbl_names[_i2074]); + xfer += iprot->readString(this->tbl_names[_i2080]); } xfer += iprot->readListEnd(); } @@ -13127,10 +13127,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter2075; - for (_iter2075 = this->tbl_names.begin(); _iter2075 != this->tbl_names.end(); ++_iter2075) + std::vector ::const_iterator _iter2081; + for (_iter2081 = this->tbl_names.begin(); _iter2081 != this->tbl_names.end(); ++_iter2081) { - xfer += oprot->writeString((*_iter2075)); + xfer += oprot->writeString((*_iter2081)); } xfer += oprot->writeListEnd(); } @@ -13158,10 +13158,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter2076; - for (_iter2076 = (*(this->tbl_names)).begin(); _iter2076 != (*(this->tbl_names)).end(); ++_iter2076) + std::vector ::const_iterator _iter2082; + for (_iter2082 = (*(this->tbl_names)).begin(); _iter2082 != (*(this->tbl_names)).end(); ++_iter2082) { - xfer += oprot->writeString((*_iter2076)); + xfer += oprot->writeString((*_iter2082)); } xfer += oprot->writeListEnd(); } @@ -13202,14 +13202,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2077; - ::apache::thrift::protocol::TType _etype2080; - xfer += iprot->readListBegin(_etype2080, _size2077); - this->success.resize(_size2077); - uint32_t _i2081; - for (_i2081 = 0; _i2081 < _size2077; ++_i2081) + uint32_t _size2083; + ::apache::thrift::protocol::TType _etype2086; + xfer += iprot->readListBegin(_etype2086, _size2083); + this->success.resize(_size2083); + uint32_t _i2087; + for (_i2087 = 0; _i2087 < _size2083; ++_i2087) { - xfer += this->success[_i2081].read(iprot); + xfer += this->success[_i2087].read(iprot); } xfer += iprot->readListEnd(); } @@ -13240,10 +13240,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter2082; - for (_iter2082 = this->success.begin(); _iter2082 != this->success.end(); ++_iter2082) + std::vector
::const_iterator _iter2088; + for (_iter2088 = this->success.begin(); _iter2088 != this->success.end(); ++_iter2088) { - xfer += (*_iter2082).write(oprot); + xfer += (*_iter2088).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13284,14 +13284,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2083; - ::apache::thrift::protocol::TType _etype2086; - xfer += iprot->readListBegin(_etype2086, _size2083); - (*(this->success)).resize(_size2083); - uint32_t _i2087; - for (_i2087 = 0; _i2087 < _size2083; ++_i2087) + uint32_t _size2089; + ::apache::thrift::protocol::TType _etype2092; + xfer += iprot->readListBegin(_etype2092, _size2089); + (*(this->success)).resize(_size2089); + uint32_t _i2093; + for (_i2093 = 0; _i2093 < _size2089; ++_i2093) { - xfer += (*(this->success))[_i2087].read(iprot); + xfer += (*(this->success))[_i2093].read(iprot); } xfer += iprot->readListEnd(); } @@ -13421,14 +13421,14 @@ uint32_t ThriftHiveMetastore_get_tables_ext_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2088; - ::apache::thrift::protocol::TType _etype2091; - xfer += iprot->readListBegin(_etype2091, _size2088); - this->success.resize(_size2088); - uint32_t _i2092; - for (_i2092 = 0; _i2092 < _size2088; ++_i2092) + uint32_t _size2094; + ::apache::thrift::protocol::TType _etype2097; + xfer += iprot->readListBegin(_etype2097, _size2094); + this->success.resize(_size2094); + uint32_t _i2098; + for (_i2098 = 0; _i2098 < _size2094; ++_i2098) { - xfer += this->success[_i2092].read(iprot); + xfer += this->success[_i2098].read(iprot); } xfer += iprot->readListEnd(); } @@ -13467,10 +13467,10 @@ uint32_t ThriftHiveMetastore_get_tables_ext_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2093; - for (_iter2093 = this->success.begin(); _iter2093 != this->success.end(); ++_iter2093) + std::vector ::const_iterator _iter2099; + for (_iter2099 = this->success.begin(); _iter2099 != this->success.end(); ++_iter2099) { - xfer += (*_iter2093).write(oprot); + xfer += (*_iter2099).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13515,14 +13515,14 @@ uint32_t ThriftHiveMetastore_get_tables_ext_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2094; - ::apache::thrift::protocol::TType _etype2097; - xfer += iprot->readListBegin(_etype2097, _size2094); - (*(this->success)).resize(_size2094); - uint32_t _i2098; - for (_i2098 = 0; _i2098 < _size2094; ++_i2098) + uint32_t _size2100; + ::apache::thrift::protocol::TType _etype2103; + xfer += iprot->readListBegin(_etype2103, _size2100); + (*(this->success)).resize(_size2100); + uint32_t _i2104; + for (_i2104 = 0; _i2104 < _size2100; ++_i2104) { - xfer += (*(this->success))[_i2098].read(iprot); + xfer += (*(this->success))[_i2104].read(iprot); } xfer += iprot->readListEnd(); } @@ -14704,14 +14704,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2099; - ::apache::thrift::protocol::TType _etype2102; - xfer += iprot->readListBegin(_etype2102, _size2099); - this->success.resize(_size2099); - uint32_t _i2103; - for (_i2103 = 0; _i2103 < _size2099; ++_i2103) + uint32_t _size2105; + ::apache::thrift::protocol::TType _etype2108; + xfer += iprot->readListBegin(_etype2108, _size2105); + this->success.resize(_size2105); + uint32_t _i2109; + for (_i2109 = 0; _i2109 < _size2105; ++_i2109) { - xfer += iprot->readString(this->success[_i2103]); + xfer += iprot->readString(this->success[_i2109]); } xfer += iprot->readListEnd(); } @@ -14766,10 +14766,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2104; - for (_iter2104 = this->success.begin(); _iter2104 != this->success.end(); ++_iter2104) + std::vector ::const_iterator _iter2110; + for (_iter2110 = this->success.begin(); _iter2110 != this->success.end(); ++_iter2110) { - xfer += oprot->writeString((*_iter2104)); + xfer += oprot->writeString((*_iter2110)); } xfer += oprot->writeListEnd(); } @@ -14822,14 +14822,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2105; - ::apache::thrift::protocol::TType _etype2108; - xfer += iprot->readListBegin(_etype2108, _size2105); - (*(this->success)).resize(_size2105); - uint32_t _i2109; - for (_i2109 = 0; _i2109 < _size2105; ++_i2109) + uint32_t _size2111; + ::apache::thrift::protocol::TType _etype2114; + xfer += iprot->readListBegin(_etype2114, _size2111); + (*(this->success)).resize(_size2111); + uint32_t _i2115; + for (_i2115 = 0; _i2115 < _size2111; ++_i2115) { - xfer += iprot->readString((*(this->success))[_i2109]); + xfer += iprot->readString((*(this->success))[_i2115]); } xfer += iprot->readListEnd(); } @@ -16390,14 +16390,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size2110; - ::apache::thrift::protocol::TType _etype2113; - xfer += iprot->readListBegin(_etype2113, _size2110); - this->new_parts.resize(_size2110); - uint32_t _i2114; - for (_i2114 = 0; _i2114 < _size2110; ++_i2114) + uint32_t _size2116; + ::apache::thrift::protocol::TType _etype2119; + xfer += iprot->readListBegin(_etype2119, _size2116); + this->new_parts.resize(_size2116); + uint32_t _i2120; + for (_i2120 = 0; _i2120 < _size2116; ++_i2120) { - xfer += this->new_parts[_i2114].read(iprot); + xfer += this->new_parts[_i2120].read(iprot); } xfer += iprot->readListEnd(); } @@ -16426,10 +16426,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter2115; - for (_iter2115 = this->new_parts.begin(); _iter2115 != this->new_parts.end(); ++_iter2115) + std::vector ::const_iterator _iter2121; + for (_iter2121 = this->new_parts.begin(); _iter2121 != this->new_parts.end(); ++_iter2121) { - xfer += (*_iter2115).write(oprot); + xfer += (*_iter2121).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16453,10 +16453,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter2116; - for (_iter2116 = (*(this->new_parts)).begin(); _iter2116 != (*(this->new_parts)).end(); ++_iter2116) + std::vector ::const_iterator _iter2122; + for (_iter2122 = (*(this->new_parts)).begin(); _iter2122 != (*(this->new_parts)).end(); ++_iter2122) { - xfer += (*_iter2116).write(oprot); + xfer += (*_iter2122).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16665,14 +16665,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size2117; - ::apache::thrift::protocol::TType _etype2120; - xfer += iprot->readListBegin(_etype2120, _size2117); - this->new_parts.resize(_size2117); - uint32_t _i2121; - for (_i2121 = 0; _i2121 < _size2117; ++_i2121) + uint32_t _size2123; + ::apache::thrift::protocol::TType _etype2126; + xfer += iprot->readListBegin(_etype2126, _size2123); + this->new_parts.resize(_size2123); + uint32_t _i2127; + for (_i2127 = 0; _i2127 < _size2123; ++_i2127) { - xfer += this->new_parts[_i2121].read(iprot); + xfer += this->new_parts[_i2127].read(iprot); } xfer += iprot->readListEnd(); } @@ -16701,10 +16701,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter2122; - for (_iter2122 = this->new_parts.begin(); _iter2122 != this->new_parts.end(); ++_iter2122) + std::vector ::const_iterator _iter2128; + for (_iter2128 = this->new_parts.begin(); _iter2128 != this->new_parts.end(); ++_iter2128) { - xfer += (*_iter2122).write(oprot); + xfer += (*_iter2128).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16728,10 +16728,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter2123; - for (_iter2123 = (*(this->new_parts)).begin(); _iter2123 != (*(this->new_parts)).end(); ++_iter2123) + std::vector ::const_iterator _iter2129; + for (_iter2129 = (*(this->new_parts)).begin(); _iter2129 != (*(this->new_parts)).end(); ++_iter2129) { - xfer += (*_iter2123).write(oprot); + xfer += (*_iter2129).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16956,14 +16956,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2124; - ::apache::thrift::protocol::TType _etype2127; - xfer += iprot->readListBegin(_etype2127, _size2124); - this->part_vals.resize(_size2124); - uint32_t _i2128; - for (_i2128 = 0; _i2128 < _size2124; ++_i2128) + uint32_t _size2130; + ::apache::thrift::protocol::TType _etype2133; + xfer += iprot->readListBegin(_etype2133, _size2130); + this->part_vals.resize(_size2130); + uint32_t _i2134; + for (_i2134 = 0; _i2134 < _size2130; ++_i2134) { - xfer += iprot->readString(this->part_vals[_i2128]); + xfer += iprot->readString(this->part_vals[_i2134]); } xfer += iprot->readListEnd(); } @@ -17000,10 +17000,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2129; - for (_iter2129 = this->part_vals.begin(); _iter2129 != this->part_vals.end(); ++_iter2129) + std::vector ::const_iterator _iter2135; + for (_iter2135 = this->part_vals.begin(); _iter2135 != this->part_vals.end(); ++_iter2135) { - xfer += oprot->writeString((*_iter2129)); + xfer += oprot->writeString((*_iter2135)); } xfer += oprot->writeListEnd(); } @@ -17035,10 +17035,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2130; - for (_iter2130 = (*(this->part_vals)).begin(); _iter2130 != (*(this->part_vals)).end(); ++_iter2130) + std::vector ::const_iterator _iter2136; + for (_iter2136 = (*(this->part_vals)).begin(); _iter2136 != (*(this->part_vals)).end(); ++_iter2136) { - xfer += oprot->writeString((*_iter2130)); + xfer += oprot->writeString((*_iter2136)); } xfer += oprot->writeListEnd(); } @@ -17510,14 +17510,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2131; - ::apache::thrift::protocol::TType _etype2134; - xfer += iprot->readListBegin(_etype2134, _size2131); - this->part_vals.resize(_size2131); - uint32_t _i2135; - for (_i2135 = 0; _i2135 < _size2131; ++_i2135) + uint32_t _size2137; + ::apache::thrift::protocol::TType _etype2140; + xfer += iprot->readListBegin(_etype2140, _size2137); + this->part_vals.resize(_size2137); + uint32_t _i2141; + for (_i2141 = 0; _i2141 < _size2137; ++_i2141) { - xfer += iprot->readString(this->part_vals[_i2135]); + xfer += iprot->readString(this->part_vals[_i2141]); } xfer += iprot->readListEnd(); } @@ -17562,10 +17562,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2136; - for (_iter2136 = this->part_vals.begin(); _iter2136 != this->part_vals.end(); ++_iter2136) + std::vector ::const_iterator _iter2142; + for (_iter2142 = this->part_vals.begin(); _iter2142 != this->part_vals.end(); ++_iter2142) { - xfer += oprot->writeString((*_iter2136)); + xfer += oprot->writeString((*_iter2142)); } xfer += oprot->writeListEnd(); } @@ -17601,10 +17601,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2137; - for (_iter2137 = (*(this->part_vals)).begin(); _iter2137 != (*(this->part_vals)).end(); ++_iter2137) + std::vector ::const_iterator _iter2143; + for (_iter2143 = (*(this->part_vals)).begin(); _iter2143 != (*(this->part_vals)).end(); ++_iter2143) { - xfer += oprot->writeString((*_iter2137)); + xfer += oprot->writeString((*_iter2143)); } xfer += oprot->writeListEnd(); } @@ -18407,14 +18407,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2138; - ::apache::thrift::protocol::TType _etype2141; - xfer += iprot->readListBegin(_etype2141, _size2138); - this->part_vals.resize(_size2138); - uint32_t _i2142; - for (_i2142 = 0; _i2142 < _size2138; ++_i2142) + uint32_t _size2144; + ::apache::thrift::protocol::TType _etype2147; + xfer += iprot->readListBegin(_etype2147, _size2144); + this->part_vals.resize(_size2144); + uint32_t _i2148; + for (_i2148 = 0; _i2148 < _size2144; ++_i2148) { - xfer += iprot->readString(this->part_vals[_i2142]); + xfer += iprot->readString(this->part_vals[_i2148]); } xfer += iprot->readListEnd(); } @@ -18459,10 +18459,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2143; - for (_iter2143 = this->part_vals.begin(); _iter2143 != this->part_vals.end(); ++_iter2143) + std::vector ::const_iterator _iter2149; + for (_iter2149 = this->part_vals.begin(); _iter2149 != this->part_vals.end(); ++_iter2149) { - xfer += oprot->writeString((*_iter2143)); + xfer += oprot->writeString((*_iter2149)); } xfer += oprot->writeListEnd(); } @@ -18498,10 +18498,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2144; - for (_iter2144 = (*(this->part_vals)).begin(); _iter2144 != (*(this->part_vals)).end(); ++_iter2144) + std::vector ::const_iterator _iter2150; + for (_iter2150 = (*(this->part_vals)).begin(); _iter2150 != (*(this->part_vals)).end(); ++_iter2150) { - xfer += oprot->writeString((*_iter2144)); + xfer += oprot->writeString((*_iter2150)); } xfer += oprot->writeListEnd(); } @@ -18710,14 +18710,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2145; - ::apache::thrift::protocol::TType _etype2148; - xfer += iprot->readListBegin(_etype2148, _size2145); - this->part_vals.resize(_size2145); - uint32_t _i2149; - for (_i2149 = 0; _i2149 < _size2145; ++_i2149) + uint32_t _size2151; + ::apache::thrift::protocol::TType _etype2154; + xfer += iprot->readListBegin(_etype2154, _size2151); + this->part_vals.resize(_size2151); + uint32_t _i2155; + for (_i2155 = 0; _i2155 < _size2151; ++_i2155) { - xfer += iprot->readString(this->part_vals[_i2149]); + xfer += iprot->readString(this->part_vals[_i2155]); } xfer += iprot->readListEnd(); } @@ -18770,10 +18770,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2150; - for (_iter2150 = this->part_vals.begin(); _iter2150 != this->part_vals.end(); ++_iter2150) + std::vector ::const_iterator _iter2156; + for (_iter2156 = this->part_vals.begin(); _iter2156 != this->part_vals.end(); ++_iter2156) { - xfer += oprot->writeString((*_iter2150)); + xfer += oprot->writeString((*_iter2156)); } xfer += oprot->writeListEnd(); } @@ -18813,10 +18813,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2151; - for (_iter2151 = (*(this->part_vals)).begin(); _iter2151 != (*(this->part_vals)).end(); ++_iter2151) + std::vector ::const_iterator _iter2157; + for (_iter2157 = (*(this->part_vals)).begin(); _iter2157 != (*(this->part_vals)).end(); ++_iter2157) { - xfer += oprot->writeString((*_iter2151)); + xfer += oprot->writeString((*_iter2157)); } xfer += oprot->writeListEnd(); } @@ -19822,14 +19822,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2152; - ::apache::thrift::protocol::TType _etype2155; - xfer += iprot->readListBegin(_etype2155, _size2152); - this->part_vals.resize(_size2152); - uint32_t _i2156; - for (_i2156 = 0; _i2156 < _size2152; ++_i2156) + uint32_t _size2158; + ::apache::thrift::protocol::TType _etype2161; + xfer += iprot->readListBegin(_etype2161, _size2158); + this->part_vals.resize(_size2158); + uint32_t _i2162; + for (_i2162 = 0; _i2162 < _size2158; ++_i2162) { - xfer += iprot->readString(this->part_vals[_i2156]); + xfer += iprot->readString(this->part_vals[_i2162]); } xfer += iprot->readListEnd(); } @@ -19866,10 +19866,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2157; - for (_iter2157 = this->part_vals.begin(); _iter2157 != this->part_vals.end(); ++_iter2157) + std::vector ::const_iterator _iter2163; + for (_iter2163 = this->part_vals.begin(); _iter2163 != this->part_vals.end(); ++_iter2163) { - xfer += oprot->writeString((*_iter2157)); + xfer += oprot->writeString((*_iter2163)); } xfer += oprot->writeListEnd(); } @@ -19901,10 +19901,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2158; - for (_iter2158 = (*(this->part_vals)).begin(); _iter2158 != (*(this->part_vals)).end(); ++_iter2158) + std::vector ::const_iterator _iter2164; + for (_iter2164 = (*(this->part_vals)).begin(); _iter2164 != (*(this->part_vals)).end(); ++_iter2164) { - xfer += oprot->writeString((*_iter2158)); + xfer += oprot->writeString((*_iter2164)); } xfer += oprot->writeListEnd(); } @@ -20320,17 +20320,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size2159; - ::apache::thrift::protocol::TType _ktype2160; - ::apache::thrift::protocol::TType _vtype2161; - xfer += iprot->readMapBegin(_ktype2160, _vtype2161, _size2159); - uint32_t _i2163; - for (_i2163 = 0; _i2163 < _size2159; ++_i2163) + uint32_t _size2165; + ::apache::thrift::protocol::TType _ktype2166; + ::apache::thrift::protocol::TType _vtype2167; + xfer += iprot->readMapBegin(_ktype2166, _vtype2167, _size2165); + uint32_t _i2169; + for (_i2169 = 0; _i2169 < _size2165; ++_i2169) { - std::string _key2164; - xfer += iprot->readString(_key2164); - std::string& _val2165 = this->partitionSpecs[_key2164]; - xfer += iprot->readString(_val2165); + std::string _key2170; + xfer += iprot->readString(_key2170); + std::string& _val2171 = this->partitionSpecs[_key2170]; + xfer += iprot->readString(_val2171); } xfer += iprot->readMapEnd(); } @@ -20391,11 +20391,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter2166; - for (_iter2166 = this->partitionSpecs.begin(); _iter2166 != this->partitionSpecs.end(); ++_iter2166) + std::map ::const_iterator _iter2172; + for (_iter2172 = this->partitionSpecs.begin(); _iter2172 != this->partitionSpecs.end(); ++_iter2172) { - xfer += oprot->writeString(_iter2166->first); - xfer += oprot->writeString(_iter2166->second); + xfer += oprot->writeString(_iter2172->first); + xfer += oprot->writeString(_iter2172->second); } xfer += oprot->writeMapEnd(); } @@ -20435,11 +20435,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter2167; - for (_iter2167 = (*(this->partitionSpecs)).begin(); _iter2167 != (*(this->partitionSpecs)).end(); ++_iter2167) + std::map ::const_iterator _iter2173; + for (_iter2173 = (*(this->partitionSpecs)).begin(); _iter2173 != (*(this->partitionSpecs)).end(); ++_iter2173) { - xfer += oprot->writeString(_iter2167->first); - xfer += oprot->writeString(_iter2167->second); + xfer += oprot->writeString(_iter2173->first); + xfer += oprot->writeString(_iter2173->second); } xfer += oprot->writeMapEnd(); } @@ -20684,17 +20684,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size2168; - ::apache::thrift::protocol::TType _ktype2169; - ::apache::thrift::protocol::TType _vtype2170; - xfer += iprot->readMapBegin(_ktype2169, _vtype2170, _size2168); - uint32_t _i2172; - for (_i2172 = 0; _i2172 < _size2168; ++_i2172) + uint32_t _size2174; + ::apache::thrift::protocol::TType _ktype2175; + ::apache::thrift::protocol::TType _vtype2176; + xfer += iprot->readMapBegin(_ktype2175, _vtype2176, _size2174); + uint32_t _i2178; + for (_i2178 = 0; _i2178 < _size2174; ++_i2178) { - std::string _key2173; - xfer += iprot->readString(_key2173); - std::string& _val2174 = this->partitionSpecs[_key2173]; - xfer += iprot->readString(_val2174); + std::string _key2179; + xfer += iprot->readString(_key2179); + std::string& _val2180 = this->partitionSpecs[_key2179]; + xfer += iprot->readString(_val2180); } xfer += iprot->readMapEnd(); } @@ -20755,11 +20755,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter2175; - for (_iter2175 = this->partitionSpecs.begin(); _iter2175 != this->partitionSpecs.end(); ++_iter2175) + std::map ::const_iterator _iter2181; + for (_iter2181 = this->partitionSpecs.begin(); _iter2181 != this->partitionSpecs.end(); ++_iter2181) { - xfer += oprot->writeString(_iter2175->first); - xfer += oprot->writeString(_iter2175->second); + xfer += oprot->writeString(_iter2181->first); + xfer += oprot->writeString(_iter2181->second); } xfer += oprot->writeMapEnd(); } @@ -20799,11 +20799,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter2176; - for (_iter2176 = (*(this->partitionSpecs)).begin(); _iter2176 != (*(this->partitionSpecs)).end(); ++_iter2176) + std::map ::const_iterator _iter2182; + for (_iter2182 = (*(this->partitionSpecs)).begin(); _iter2182 != (*(this->partitionSpecs)).end(); ++_iter2182) { - xfer += oprot->writeString(_iter2176->first); - xfer += oprot->writeString(_iter2176->second); + xfer += oprot->writeString(_iter2182->first); + xfer += oprot->writeString(_iter2182->second); } xfer += oprot->writeMapEnd(); } @@ -20860,14 +20860,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2177; - ::apache::thrift::protocol::TType _etype2180; - xfer += iprot->readListBegin(_etype2180, _size2177); - this->success.resize(_size2177); - uint32_t _i2181; - for (_i2181 = 0; _i2181 < _size2177; ++_i2181) + uint32_t _size2183; + ::apache::thrift::protocol::TType _etype2186; + xfer += iprot->readListBegin(_etype2186, _size2183); + this->success.resize(_size2183); + uint32_t _i2187; + for (_i2187 = 0; _i2187 < _size2183; ++_i2187) { - xfer += this->success[_i2181].read(iprot); + xfer += this->success[_i2187].read(iprot); } xfer += iprot->readListEnd(); } @@ -20930,10 +20930,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2182; - for (_iter2182 = this->success.begin(); _iter2182 != this->success.end(); ++_iter2182) + std::vector ::const_iterator _iter2188; + for (_iter2188 = this->success.begin(); _iter2188 != this->success.end(); ++_iter2188) { - xfer += (*_iter2182).write(oprot); + xfer += (*_iter2188).write(oprot); } xfer += oprot->writeListEnd(); } @@ -20990,14 +20990,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2183; - ::apache::thrift::protocol::TType _etype2186; - xfer += iprot->readListBegin(_etype2186, _size2183); - (*(this->success)).resize(_size2183); - uint32_t _i2187; - for (_i2187 = 0; _i2187 < _size2183; ++_i2187) + uint32_t _size2189; + ::apache::thrift::protocol::TType _etype2192; + xfer += iprot->readListBegin(_etype2192, _size2189); + (*(this->success)).resize(_size2189); + uint32_t _i2193; + for (_i2193 = 0; _i2193 < _size2189; ++_i2193) { - xfer += (*(this->success))[_i2187].read(iprot); + xfer += (*(this->success))[_i2193].read(iprot); } xfer += iprot->readListEnd(); } @@ -21096,14 +21096,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2188; - ::apache::thrift::protocol::TType _etype2191; - xfer += iprot->readListBegin(_etype2191, _size2188); - this->part_vals.resize(_size2188); - uint32_t _i2192; - for (_i2192 = 0; _i2192 < _size2188; ++_i2192) + uint32_t _size2194; + ::apache::thrift::protocol::TType _etype2197; + xfer += iprot->readListBegin(_etype2197, _size2194); + this->part_vals.resize(_size2194); + uint32_t _i2198; + for (_i2198 = 0; _i2198 < _size2194; ++_i2198) { - xfer += iprot->readString(this->part_vals[_i2192]); + xfer += iprot->readString(this->part_vals[_i2198]); } xfer += iprot->readListEnd(); } @@ -21124,14 +21124,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size2193; - ::apache::thrift::protocol::TType _etype2196; - xfer += iprot->readListBegin(_etype2196, _size2193); - this->group_names.resize(_size2193); - uint32_t _i2197; - for (_i2197 = 0; _i2197 < _size2193; ++_i2197) + uint32_t _size2199; + ::apache::thrift::protocol::TType _etype2202; + xfer += iprot->readListBegin(_etype2202, _size2199); + this->group_names.resize(_size2199); + uint32_t _i2203; + for (_i2203 = 0; _i2203 < _size2199; ++_i2203) { - xfer += iprot->readString(this->group_names[_i2197]); + xfer += iprot->readString(this->group_names[_i2203]); } xfer += iprot->readListEnd(); } @@ -21168,10 +21168,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2198; - for (_iter2198 = this->part_vals.begin(); _iter2198 != this->part_vals.end(); ++_iter2198) + std::vector ::const_iterator _iter2204; + for (_iter2204 = this->part_vals.begin(); _iter2204 != this->part_vals.end(); ++_iter2204) { - xfer += oprot->writeString((*_iter2198)); + xfer += oprot->writeString((*_iter2204)); } xfer += oprot->writeListEnd(); } @@ -21184,10 +21184,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter2199; - for (_iter2199 = this->group_names.begin(); _iter2199 != this->group_names.end(); ++_iter2199) + std::vector ::const_iterator _iter2205; + for (_iter2205 = this->group_names.begin(); _iter2205 != this->group_names.end(); ++_iter2205) { - xfer += oprot->writeString((*_iter2199)); + xfer += oprot->writeString((*_iter2205)); } xfer += oprot->writeListEnd(); } @@ -21219,10 +21219,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2200; - for (_iter2200 = (*(this->part_vals)).begin(); _iter2200 != (*(this->part_vals)).end(); ++_iter2200) + std::vector ::const_iterator _iter2206; + for (_iter2206 = (*(this->part_vals)).begin(); _iter2206 != (*(this->part_vals)).end(); ++_iter2206) { - xfer += oprot->writeString((*_iter2200)); + xfer += oprot->writeString((*_iter2206)); } xfer += oprot->writeListEnd(); } @@ -21235,10 +21235,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter2201; - for (_iter2201 = (*(this->group_names)).begin(); _iter2201 != (*(this->group_names)).end(); ++_iter2201) + std::vector ::const_iterator _iter2207; + for (_iter2207 = (*(this->group_names)).begin(); _iter2207 != (*(this->group_names)).end(); ++_iter2207) { - xfer += oprot->writeString((*_iter2201)); + xfer += oprot->writeString((*_iter2207)); } xfer += oprot->writeListEnd(); } @@ -21797,14 +21797,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2202; - ::apache::thrift::protocol::TType _etype2205; - xfer += iprot->readListBegin(_etype2205, _size2202); - this->success.resize(_size2202); - uint32_t _i2206; - for (_i2206 = 0; _i2206 < _size2202; ++_i2206) + uint32_t _size2208; + ::apache::thrift::protocol::TType _etype2211; + xfer += iprot->readListBegin(_etype2211, _size2208); + this->success.resize(_size2208); + uint32_t _i2212; + for (_i2212 = 0; _i2212 < _size2208; ++_i2212) { - xfer += this->success[_i2206].read(iprot); + xfer += this->success[_i2212].read(iprot); } xfer += iprot->readListEnd(); } @@ -21851,10 +21851,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2207; - for (_iter2207 = this->success.begin(); _iter2207 != this->success.end(); ++_iter2207) + std::vector ::const_iterator _iter2213; + for (_iter2213 = this->success.begin(); _iter2213 != this->success.end(); ++_iter2213) { - xfer += (*_iter2207).write(oprot); + xfer += (*_iter2213).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21903,14 +21903,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2208; - ::apache::thrift::protocol::TType _etype2211; - xfer += iprot->readListBegin(_etype2211, _size2208); - (*(this->success)).resize(_size2208); - uint32_t _i2212; - for (_i2212 = 0; _i2212 < _size2208; ++_i2212) + uint32_t _size2214; + ::apache::thrift::protocol::TType _etype2217; + xfer += iprot->readListBegin(_etype2217, _size2214); + (*(this->success)).resize(_size2214); + uint32_t _i2218; + for (_i2218 = 0; _i2218 < _size2214; ++_i2218) { - xfer += (*(this->success))[_i2212].read(iprot); + xfer += (*(this->success))[_i2218].read(iprot); } xfer += iprot->readListEnd(); } @@ -22236,14 +22236,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size2213; - ::apache::thrift::protocol::TType _etype2216; - xfer += iprot->readListBegin(_etype2216, _size2213); - this->group_names.resize(_size2213); - uint32_t _i2217; - for (_i2217 = 0; _i2217 < _size2213; ++_i2217) + uint32_t _size2219; + ::apache::thrift::protocol::TType _etype2222; + xfer += iprot->readListBegin(_etype2222, _size2219); + this->group_names.resize(_size2219); + uint32_t _i2223; + for (_i2223 = 0; _i2223 < _size2219; ++_i2223) { - xfer += iprot->readString(this->group_names[_i2217]); + xfer += iprot->readString(this->group_names[_i2223]); } xfer += iprot->readListEnd(); } @@ -22288,10 +22288,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter2218; - for (_iter2218 = this->group_names.begin(); _iter2218 != this->group_names.end(); ++_iter2218) + std::vector ::const_iterator _iter2224; + for (_iter2224 = this->group_names.begin(); _iter2224 != this->group_names.end(); ++_iter2224) { - xfer += oprot->writeString((*_iter2218)); + xfer += oprot->writeString((*_iter2224)); } xfer += oprot->writeListEnd(); } @@ -22331,10 +22331,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter2219; - for (_iter2219 = (*(this->group_names)).begin(); _iter2219 != (*(this->group_names)).end(); ++_iter2219) + std::vector ::const_iterator _iter2225; + for (_iter2225 = (*(this->group_names)).begin(); _iter2225 != (*(this->group_names)).end(); ++_iter2225) { - xfer += oprot->writeString((*_iter2219)); + xfer += oprot->writeString((*_iter2225)); } xfer += oprot->writeListEnd(); } @@ -22375,14 +22375,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2220; - ::apache::thrift::protocol::TType _etype2223; - xfer += iprot->readListBegin(_etype2223, _size2220); - this->success.resize(_size2220); - uint32_t _i2224; - for (_i2224 = 0; _i2224 < _size2220; ++_i2224) + uint32_t _size2226; + ::apache::thrift::protocol::TType _etype2229; + xfer += iprot->readListBegin(_etype2229, _size2226); + this->success.resize(_size2226); + uint32_t _i2230; + for (_i2230 = 0; _i2230 < _size2226; ++_i2230) { - xfer += this->success[_i2224].read(iprot); + xfer += this->success[_i2230].read(iprot); } xfer += iprot->readListEnd(); } @@ -22429,10 +22429,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2225; - for (_iter2225 = this->success.begin(); _iter2225 != this->success.end(); ++_iter2225) + std::vector ::const_iterator _iter2231; + for (_iter2231 = this->success.begin(); _iter2231 != this->success.end(); ++_iter2231) { - xfer += (*_iter2225).write(oprot); + xfer += (*_iter2231).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22481,14 +22481,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2226; - ::apache::thrift::protocol::TType _etype2229; - xfer += iprot->readListBegin(_etype2229, _size2226); - (*(this->success)).resize(_size2226); - uint32_t _i2230; - for (_i2230 = 0; _i2230 < _size2226; ++_i2230) + uint32_t _size2232; + ::apache::thrift::protocol::TType _etype2235; + xfer += iprot->readListBegin(_etype2235, _size2232); + (*(this->success)).resize(_size2232); + uint32_t _i2236; + for (_i2236 = 0; _i2236 < _size2232; ++_i2236) { - xfer += (*(this->success))[_i2230].read(iprot); + xfer += (*(this->success))[_i2236].read(iprot); } xfer += iprot->readListEnd(); } @@ -22666,14 +22666,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2231; - ::apache::thrift::protocol::TType _etype2234; - xfer += iprot->readListBegin(_etype2234, _size2231); - this->success.resize(_size2231); - uint32_t _i2235; - for (_i2235 = 0; _i2235 < _size2231; ++_i2235) + uint32_t _size2237; + ::apache::thrift::protocol::TType _etype2240; + xfer += iprot->readListBegin(_etype2240, _size2237); + this->success.resize(_size2237); + uint32_t _i2241; + for (_i2241 = 0; _i2241 < _size2237; ++_i2241) { - xfer += this->success[_i2235].read(iprot); + xfer += this->success[_i2241].read(iprot); } xfer += iprot->readListEnd(); } @@ -22720,10 +22720,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2236; - for (_iter2236 = this->success.begin(); _iter2236 != this->success.end(); ++_iter2236) + std::vector ::const_iterator _iter2242; + for (_iter2242 = this->success.begin(); _iter2242 != this->success.end(); ++_iter2242) { - xfer += (*_iter2236).write(oprot); + xfer += (*_iter2242).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22772,14 +22772,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2237; - ::apache::thrift::protocol::TType _etype2240; - xfer += iprot->readListBegin(_etype2240, _size2237); - (*(this->success)).resize(_size2237); - uint32_t _i2241; - for (_i2241 = 0; _i2241 < _size2237; ++_i2241) + uint32_t _size2243; + ::apache::thrift::protocol::TType _etype2246; + xfer += iprot->readListBegin(_etype2246, _size2243); + (*(this->success)).resize(_size2243); + uint32_t _i2247; + for (_i2247 = 0; _i2247 < _size2243; ++_i2247) { - xfer += (*(this->success))[_i2241].read(iprot); + xfer += (*(this->success))[_i2247].read(iprot); } xfer += iprot->readListEnd(); } @@ -22957,14 +22957,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2242; - ::apache::thrift::protocol::TType _etype2245; - xfer += iprot->readListBegin(_etype2245, _size2242); - this->success.resize(_size2242); - uint32_t _i2246; - for (_i2246 = 0; _i2246 < _size2242; ++_i2246) + uint32_t _size2248; + ::apache::thrift::protocol::TType _etype2251; + xfer += iprot->readListBegin(_etype2251, _size2248); + this->success.resize(_size2248); + uint32_t _i2252; + for (_i2252 = 0; _i2252 < _size2248; ++_i2252) { - xfer += iprot->readString(this->success[_i2246]); + xfer += iprot->readString(this->success[_i2252]); } xfer += iprot->readListEnd(); } @@ -23011,10 +23011,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2247; - for (_iter2247 = this->success.begin(); _iter2247 != this->success.end(); ++_iter2247) + std::vector ::const_iterator _iter2253; + for (_iter2253 = this->success.begin(); _iter2253 != this->success.end(); ++_iter2253) { - xfer += oprot->writeString((*_iter2247)); + xfer += oprot->writeString((*_iter2253)); } xfer += oprot->writeListEnd(); } @@ -23063,14 +23063,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2248; - ::apache::thrift::protocol::TType _etype2251; - xfer += iprot->readListBegin(_etype2251, _size2248); - (*(this->success)).resize(_size2248); - uint32_t _i2252; - for (_i2252 = 0; _i2252 < _size2248; ++_i2252) + uint32_t _size2254; + ::apache::thrift::protocol::TType _etype2257; + xfer += iprot->readListBegin(_etype2257, _size2254); + (*(this->success)).resize(_size2254); + uint32_t _i2258; + for (_i2258 = 0; _i2258 < _size2254; ++_i2258) { - xfer += iprot->readString((*(this->success))[_i2252]); + xfer += iprot->readString((*(this->success))[_i2258]); } xfer += iprot->readListEnd(); } @@ -23380,14 +23380,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2253; - ::apache::thrift::protocol::TType _etype2256; - xfer += iprot->readListBegin(_etype2256, _size2253); - this->part_vals.resize(_size2253); - uint32_t _i2257; - for (_i2257 = 0; _i2257 < _size2253; ++_i2257) + uint32_t _size2259; + ::apache::thrift::protocol::TType _etype2262; + xfer += iprot->readListBegin(_etype2262, _size2259); + this->part_vals.resize(_size2259); + uint32_t _i2263; + for (_i2263 = 0; _i2263 < _size2259; ++_i2263) { - xfer += iprot->readString(this->part_vals[_i2257]); + xfer += iprot->readString(this->part_vals[_i2263]); } xfer += iprot->readListEnd(); } @@ -23432,10 +23432,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2258; - for (_iter2258 = this->part_vals.begin(); _iter2258 != this->part_vals.end(); ++_iter2258) + std::vector ::const_iterator _iter2264; + for (_iter2264 = this->part_vals.begin(); _iter2264 != this->part_vals.end(); ++_iter2264) { - xfer += oprot->writeString((*_iter2258)); + xfer += oprot->writeString((*_iter2264)); } xfer += oprot->writeListEnd(); } @@ -23471,10 +23471,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2259; - for (_iter2259 = (*(this->part_vals)).begin(); _iter2259 != (*(this->part_vals)).end(); ++_iter2259) + std::vector ::const_iterator _iter2265; + for (_iter2265 = (*(this->part_vals)).begin(); _iter2265 != (*(this->part_vals)).end(); ++_iter2265) { - xfer += oprot->writeString((*_iter2259)); + xfer += oprot->writeString((*_iter2265)); } xfer += oprot->writeListEnd(); } @@ -23519,14 +23519,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2260; - ::apache::thrift::protocol::TType _etype2263; - xfer += iprot->readListBegin(_etype2263, _size2260); - this->success.resize(_size2260); - uint32_t _i2264; - for (_i2264 = 0; _i2264 < _size2260; ++_i2264) + uint32_t _size2266; + ::apache::thrift::protocol::TType _etype2269; + xfer += iprot->readListBegin(_etype2269, _size2266); + this->success.resize(_size2266); + uint32_t _i2270; + for (_i2270 = 0; _i2270 < _size2266; ++_i2270) { - xfer += this->success[_i2264].read(iprot); + xfer += this->success[_i2270].read(iprot); } xfer += iprot->readListEnd(); } @@ -23573,10 +23573,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2265; - for (_iter2265 = this->success.begin(); _iter2265 != this->success.end(); ++_iter2265) + std::vector ::const_iterator _iter2271; + for (_iter2271 = this->success.begin(); _iter2271 != this->success.end(); ++_iter2271) { - xfer += (*_iter2265).write(oprot); + xfer += (*_iter2271).write(oprot); } xfer += oprot->writeListEnd(); } @@ -23625,14 +23625,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2266; - ::apache::thrift::protocol::TType _etype2269; - xfer += iprot->readListBegin(_etype2269, _size2266); - (*(this->success)).resize(_size2266); - uint32_t _i2270; - for (_i2270 = 0; _i2270 < _size2266; ++_i2270) + uint32_t _size2272; + ::apache::thrift::protocol::TType _etype2275; + xfer += iprot->readListBegin(_etype2275, _size2272); + (*(this->success)).resize(_size2272); + uint32_t _i2276; + for (_i2276 = 0; _i2276 < _size2272; ++_i2276) { - xfer += (*(this->success))[_i2270].read(iprot); + xfer += (*(this->success))[_i2276].read(iprot); } xfer += iprot->readListEnd(); } @@ -23715,14 +23715,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2271; - ::apache::thrift::protocol::TType _etype2274; - xfer += iprot->readListBegin(_etype2274, _size2271); - this->part_vals.resize(_size2271); - uint32_t _i2275; - for (_i2275 = 0; _i2275 < _size2271; ++_i2275) + uint32_t _size2277; + ::apache::thrift::protocol::TType _etype2280; + xfer += iprot->readListBegin(_etype2280, _size2277); + this->part_vals.resize(_size2277); + uint32_t _i2281; + for (_i2281 = 0; _i2281 < _size2277; ++_i2281) { - xfer += iprot->readString(this->part_vals[_i2275]); + xfer += iprot->readString(this->part_vals[_i2281]); } xfer += iprot->readListEnd(); } @@ -23751,14 +23751,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size2276; - ::apache::thrift::protocol::TType _etype2279; - xfer += iprot->readListBegin(_etype2279, _size2276); - this->group_names.resize(_size2276); - uint32_t _i2280; - for (_i2280 = 0; _i2280 < _size2276; ++_i2280) + uint32_t _size2282; + ::apache::thrift::protocol::TType _etype2285; + xfer += iprot->readListBegin(_etype2285, _size2282); + this->group_names.resize(_size2282); + uint32_t _i2286; + for (_i2286 = 0; _i2286 < _size2282; ++_i2286) { - xfer += iprot->readString(this->group_names[_i2280]); + xfer += iprot->readString(this->group_names[_i2286]); } xfer += iprot->readListEnd(); } @@ -23795,10 +23795,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2281; - for (_iter2281 = this->part_vals.begin(); _iter2281 != this->part_vals.end(); ++_iter2281) + std::vector ::const_iterator _iter2287; + for (_iter2287 = this->part_vals.begin(); _iter2287 != this->part_vals.end(); ++_iter2287) { - xfer += oprot->writeString((*_iter2281)); + xfer += oprot->writeString((*_iter2287)); } xfer += oprot->writeListEnd(); } @@ -23815,10 +23815,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter2282; - for (_iter2282 = this->group_names.begin(); _iter2282 != this->group_names.end(); ++_iter2282) + std::vector ::const_iterator _iter2288; + for (_iter2288 = this->group_names.begin(); _iter2288 != this->group_names.end(); ++_iter2288) { - xfer += oprot->writeString((*_iter2282)); + xfer += oprot->writeString((*_iter2288)); } xfer += oprot->writeListEnd(); } @@ -23850,10 +23850,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2283; - for (_iter2283 = (*(this->part_vals)).begin(); _iter2283 != (*(this->part_vals)).end(); ++_iter2283) + std::vector ::const_iterator _iter2289; + for (_iter2289 = (*(this->part_vals)).begin(); _iter2289 != (*(this->part_vals)).end(); ++_iter2289) { - xfer += oprot->writeString((*_iter2283)); + xfer += oprot->writeString((*_iter2289)); } xfer += oprot->writeListEnd(); } @@ -23870,10 +23870,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter2284; - for (_iter2284 = (*(this->group_names)).begin(); _iter2284 != (*(this->group_names)).end(); ++_iter2284) + std::vector ::const_iterator _iter2290; + for (_iter2290 = (*(this->group_names)).begin(); _iter2290 != (*(this->group_names)).end(); ++_iter2290) { - xfer += oprot->writeString((*_iter2284)); + xfer += oprot->writeString((*_iter2290)); } xfer += oprot->writeListEnd(); } @@ -23914,14 +23914,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2285; - ::apache::thrift::protocol::TType _etype2288; - xfer += iprot->readListBegin(_etype2288, _size2285); - this->success.resize(_size2285); - uint32_t _i2289; - for (_i2289 = 0; _i2289 < _size2285; ++_i2289) + uint32_t _size2291; + ::apache::thrift::protocol::TType _etype2294; + xfer += iprot->readListBegin(_etype2294, _size2291); + this->success.resize(_size2291); + uint32_t _i2295; + for (_i2295 = 0; _i2295 < _size2291; ++_i2295) { - xfer += this->success[_i2289].read(iprot); + xfer += this->success[_i2295].read(iprot); } xfer += iprot->readListEnd(); } @@ -23968,10 +23968,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2290; - for (_iter2290 = this->success.begin(); _iter2290 != this->success.end(); ++_iter2290) + std::vector ::const_iterator _iter2296; + for (_iter2296 = this->success.begin(); _iter2296 != this->success.end(); ++_iter2296) { - xfer += (*_iter2290).write(oprot); + xfer += (*_iter2296).write(oprot); } xfer += oprot->writeListEnd(); } @@ -24020,14 +24020,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2291; - ::apache::thrift::protocol::TType _etype2294; - xfer += iprot->readListBegin(_etype2294, _size2291); - (*(this->success)).resize(_size2291); - uint32_t _i2295; - for (_i2295 = 0; _i2295 < _size2291; ++_i2295) + uint32_t _size2297; + ::apache::thrift::protocol::TType _etype2300; + xfer += iprot->readListBegin(_etype2300, _size2297); + (*(this->success)).resize(_size2297); + uint32_t _i2301; + for (_i2301 = 0; _i2301 < _size2297; ++_i2301) { - xfer += (*(this->success))[_i2295].read(iprot); + xfer += (*(this->success))[_i2301].read(iprot); } xfer += iprot->readListEnd(); } @@ -24337,14 +24337,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2296; - ::apache::thrift::protocol::TType _etype2299; - xfer += iprot->readListBegin(_etype2299, _size2296); - this->part_vals.resize(_size2296); - uint32_t _i2300; - for (_i2300 = 0; _i2300 < _size2296; ++_i2300) + uint32_t _size2302; + ::apache::thrift::protocol::TType _etype2305; + xfer += iprot->readListBegin(_etype2305, _size2302); + this->part_vals.resize(_size2302); + uint32_t _i2306; + for (_i2306 = 0; _i2306 < _size2302; ++_i2306) { - xfer += iprot->readString(this->part_vals[_i2300]); + xfer += iprot->readString(this->part_vals[_i2306]); } xfer += iprot->readListEnd(); } @@ -24389,10 +24389,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2301; - for (_iter2301 = this->part_vals.begin(); _iter2301 != this->part_vals.end(); ++_iter2301) + std::vector ::const_iterator _iter2307; + for (_iter2307 = this->part_vals.begin(); _iter2307 != this->part_vals.end(); ++_iter2307) { - xfer += oprot->writeString((*_iter2301)); + xfer += oprot->writeString((*_iter2307)); } xfer += oprot->writeListEnd(); } @@ -24428,10 +24428,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2302; - for (_iter2302 = (*(this->part_vals)).begin(); _iter2302 != (*(this->part_vals)).end(); ++_iter2302) + std::vector ::const_iterator _iter2308; + for (_iter2308 = (*(this->part_vals)).begin(); _iter2308 != (*(this->part_vals)).end(); ++_iter2308) { - xfer += oprot->writeString((*_iter2302)); + xfer += oprot->writeString((*_iter2308)); } xfer += oprot->writeListEnd(); } @@ -24476,14 +24476,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2303; - ::apache::thrift::protocol::TType _etype2306; - xfer += iprot->readListBegin(_etype2306, _size2303); - this->success.resize(_size2303); - uint32_t _i2307; - for (_i2307 = 0; _i2307 < _size2303; ++_i2307) + uint32_t _size2309; + ::apache::thrift::protocol::TType _etype2312; + xfer += iprot->readListBegin(_etype2312, _size2309); + this->success.resize(_size2309); + uint32_t _i2313; + for (_i2313 = 0; _i2313 < _size2309; ++_i2313) { - xfer += iprot->readString(this->success[_i2307]); + xfer += iprot->readString(this->success[_i2313]); } xfer += iprot->readListEnd(); } @@ -24530,10 +24530,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2308; - for (_iter2308 = this->success.begin(); _iter2308 != this->success.end(); ++_iter2308) + std::vector ::const_iterator _iter2314; + for (_iter2314 = this->success.begin(); _iter2314 != this->success.end(); ++_iter2314) { - xfer += oprot->writeString((*_iter2308)); + xfer += oprot->writeString((*_iter2314)); } xfer += oprot->writeListEnd(); } @@ -24582,14 +24582,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2309; - ::apache::thrift::protocol::TType _etype2312; - xfer += iprot->readListBegin(_etype2312, _size2309); - (*(this->success)).resize(_size2309); - uint32_t _i2313; - for (_i2313 = 0; _i2313 < _size2309; ++_i2313) + uint32_t _size2315; + ::apache::thrift::protocol::TType _etype2318; + xfer += iprot->readListBegin(_etype2318, _size2315); + (*(this->success)).resize(_size2315); + uint32_t _i2319; + for (_i2319 = 0; _i2319 < _size2315; ++_i2319) { - xfer += iprot->readString((*(this->success))[_i2313]); + xfer += iprot->readString((*(this->success))[_i2319]); } xfer += iprot->readListEnd(); } @@ -24962,14 +24962,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_req_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2314; - ::apache::thrift::protocol::TType _etype2317; - xfer += iprot->readListBegin(_etype2317, _size2314); - this->success.resize(_size2314); - uint32_t _i2318; - for (_i2318 = 0; _i2318 < _size2314; ++_i2318) + uint32_t _size2320; + ::apache::thrift::protocol::TType _etype2323; + xfer += iprot->readListBegin(_etype2323, _size2320); + this->success.resize(_size2320); + uint32_t _i2324; + for (_i2324 = 0; _i2324 < _size2320; ++_i2324) { - xfer += iprot->readString(this->success[_i2318]); + xfer += iprot->readString(this->success[_i2324]); } xfer += iprot->readListEnd(); } @@ -25016,10 +25016,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_req_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2319; - for (_iter2319 = this->success.begin(); _iter2319 != this->success.end(); ++_iter2319) + std::vector ::const_iterator _iter2325; + for (_iter2325 = this->success.begin(); _iter2325 != this->success.end(); ++_iter2325) { - xfer += oprot->writeString((*_iter2319)); + xfer += oprot->writeString((*_iter2325)); } xfer += oprot->writeListEnd(); } @@ -25068,14 +25068,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_req_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2320; - ::apache::thrift::protocol::TType _etype2323; - xfer += iprot->readListBegin(_etype2323, _size2320); - (*(this->success)).resize(_size2320); - uint32_t _i2324; - for (_i2324 = 0; _i2324 < _size2320; ++_i2324) + uint32_t _size2326; + ::apache::thrift::protocol::TType _etype2329; + xfer += iprot->readListBegin(_etype2329, _size2326); + (*(this->success)).resize(_size2326); + uint32_t _i2330; + for (_i2330 = 0; _i2330 < _size2326; ++_i2330) { - xfer += iprot->readString((*(this->success))[_i2324]); + xfer += iprot->readString((*(this->success))[_i2330]); } xfer += iprot->readListEnd(); } @@ -25269,14 +25269,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2325; - ::apache::thrift::protocol::TType _etype2328; - xfer += iprot->readListBegin(_etype2328, _size2325); - this->success.resize(_size2325); - uint32_t _i2329; - for (_i2329 = 0; _i2329 < _size2325; ++_i2329) + uint32_t _size2331; + ::apache::thrift::protocol::TType _etype2334; + xfer += iprot->readListBegin(_etype2334, _size2331); + this->success.resize(_size2331); + uint32_t _i2335; + for (_i2335 = 0; _i2335 < _size2331; ++_i2335) { - xfer += this->success[_i2329].read(iprot); + xfer += this->success[_i2335].read(iprot); } xfer += iprot->readListEnd(); } @@ -25323,10 +25323,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2330; - for (_iter2330 = this->success.begin(); _iter2330 != this->success.end(); ++_iter2330) + std::vector ::const_iterator _iter2336; + for (_iter2336 = this->success.begin(); _iter2336 != this->success.end(); ++_iter2336) { - xfer += (*_iter2330).write(oprot); + xfer += (*_iter2336).write(oprot); } xfer += oprot->writeListEnd(); } @@ -25375,14 +25375,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2331; - ::apache::thrift::protocol::TType _etype2334; - xfer += iprot->readListBegin(_etype2334, _size2331); - (*(this->success)).resize(_size2331); - uint32_t _i2335; - for (_i2335 = 0; _i2335 < _size2331; ++_i2335) + uint32_t _size2337; + ::apache::thrift::protocol::TType _etype2340; + xfer += iprot->readListBegin(_etype2340, _size2337); + (*(this->success)).resize(_size2337); + uint32_t _i2341; + for (_i2341 = 0; _i2341 < _size2337; ++_i2341) { - xfer += (*(this->success))[_i2335].read(iprot); + xfer += (*(this->success))[_i2341].read(iprot); } xfer += iprot->readListEnd(); } @@ -25528,14 +25528,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_req_result::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2336; - ::apache::thrift::protocol::TType _etype2339; - xfer += iprot->readListBegin(_etype2339, _size2336); - this->success.resize(_size2336); - uint32_t _i2340; - for (_i2340 = 0; _i2340 < _size2336; ++_i2340) + uint32_t _size2342; + ::apache::thrift::protocol::TType _etype2345; + xfer += iprot->readListBegin(_etype2345, _size2342); + this->success.resize(_size2342); + uint32_t _i2346; + for (_i2346 = 0; _i2346 < _size2342; ++_i2346) { - xfer += this->success[_i2340].read(iprot); + xfer += this->success[_i2346].read(iprot); } xfer += iprot->readListEnd(); } @@ -25582,10 +25582,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_req_result::write(::apache xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2341; - for (_iter2341 = this->success.begin(); _iter2341 != this->success.end(); ++_iter2341) + std::vector ::const_iterator _iter2347; + for (_iter2347 = this->success.begin(); _iter2347 != this->success.end(); ++_iter2347) { - xfer += (*_iter2341).write(oprot); + xfer += (*_iter2347).write(oprot); } xfer += oprot->writeListEnd(); } @@ -25634,14 +25634,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_req_presult::read(::apache if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2342; - ::apache::thrift::protocol::TType _etype2345; - xfer += iprot->readListBegin(_etype2345, _size2342); - (*(this->success)).resize(_size2342); - uint32_t _i2346; - for (_i2346 = 0; _i2346 < _size2342; ++_i2346) + uint32_t _size2348; + ::apache::thrift::protocol::TType _etype2351; + xfer += iprot->readListBegin(_etype2351, _size2348); + (*(this->success)).resize(_size2348); + uint32_t _i2352; + for (_i2352 = 0; _i2352 < _size2348; ++_i2352) { - xfer += (*(this->success))[_i2346].read(iprot); + xfer += (*(this->success))[_i2352].read(iprot); } xfer += iprot->readListEnd(); } @@ -25835,14 +25835,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2347; - ::apache::thrift::protocol::TType _etype2350; - xfer += iprot->readListBegin(_etype2350, _size2347); - this->success.resize(_size2347); - uint32_t _i2351; - for (_i2351 = 0; _i2351 < _size2347; ++_i2351) + uint32_t _size2353; + ::apache::thrift::protocol::TType _etype2356; + xfer += iprot->readListBegin(_etype2356, _size2353); + this->success.resize(_size2353); + uint32_t _i2357; + for (_i2357 = 0; _i2357 < _size2353; ++_i2357) { - xfer += this->success[_i2351].read(iprot); + xfer += this->success[_i2357].read(iprot); } xfer += iprot->readListEnd(); } @@ -25889,10 +25889,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2352; - for (_iter2352 = this->success.begin(); _iter2352 != this->success.end(); ++_iter2352) + std::vector ::const_iterator _iter2358; + for (_iter2358 = this->success.begin(); _iter2358 != this->success.end(); ++_iter2358) { - xfer += (*_iter2352).write(oprot); + xfer += (*_iter2358).write(oprot); } xfer += oprot->writeListEnd(); } @@ -25941,14 +25941,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2353; - ::apache::thrift::protocol::TType _etype2356; - xfer += iprot->readListBegin(_etype2356, _size2353); - (*(this->success)).resize(_size2353); - uint32_t _i2357; - for (_i2357 = 0; _i2357 < _size2353; ++_i2357) + uint32_t _size2359; + ::apache::thrift::protocol::TType _etype2362; + xfer += iprot->readListBegin(_etype2362, _size2359); + (*(this->success)).resize(_size2359); + uint32_t _i2363; + for (_i2363 = 0; _i2363 < _size2359; ++_i2363) { - xfer += (*(this->success))[_i2357].read(iprot); + xfer += (*(this->success))[_i2363].read(iprot); } xfer += iprot->readListEnd(); } @@ -26744,14 +26744,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size2358; - ::apache::thrift::protocol::TType _etype2361; - xfer += iprot->readListBegin(_etype2361, _size2358); - this->names.resize(_size2358); - uint32_t _i2362; - for (_i2362 = 0; _i2362 < _size2358; ++_i2362) + uint32_t _size2364; + ::apache::thrift::protocol::TType _etype2367; + xfer += iprot->readListBegin(_etype2367, _size2364); + this->names.resize(_size2364); + uint32_t _i2368; + for (_i2368 = 0; _i2368 < _size2364; ++_i2368) { - xfer += iprot->readString(this->names[_i2362]); + xfer += iprot->readString(this->names[_i2368]); } xfer += iprot->readListEnd(); } @@ -26788,10 +26788,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter2363; - for (_iter2363 = this->names.begin(); _iter2363 != this->names.end(); ++_iter2363) + std::vector ::const_iterator _iter2369; + for (_iter2369 = this->names.begin(); _iter2369 != this->names.end(); ++_iter2369) { - xfer += oprot->writeString((*_iter2363)); + xfer += oprot->writeString((*_iter2369)); } xfer += oprot->writeListEnd(); } @@ -26823,10 +26823,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter2364; - for (_iter2364 = (*(this->names)).begin(); _iter2364 != (*(this->names)).end(); ++_iter2364) + std::vector ::const_iterator _iter2370; + for (_iter2370 = (*(this->names)).begin(); _iter2370 != (*(this->names)).end(); ++_iter2370) { - xfer += oprot->writeString((*_iter2364)); + xfer += oprot->writeString((*_iter2370)); } xfer += oprot->writeListEnd(); } @@ -26867,14 +26867,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2365; - ::apache::thrift::protocol::TType _etype2368; - xfer += iprot->readListBegin(_etype2368, _size2365); - this->success.resize(_size2365); - uint32_t _i2369; - for (_i2369 = 0; _i2369 < _size2365; ++_i2369) + uint32_t _size2371; + ::apache::thrift::protocol::TType _etype2374; + xfer += iprot->readListBegin(_etype2374, _size2371); + this->success.resize(_size2371); + uint32_t _i2375; + for (_i2375 = 0; _i2375 < _size2371; ++_i2375) { - xfer += this->success[_i2369].read(iprot); + xfer += this->success[_i2375].read(iprot); } xfer += iprot->readListEnd(); } @@ -26929,10 +26929,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2370; - for (_iter2370 = this->success.begin(); _iter2370 != this->success.end(); ++_iter2370) + std::vector ::const_iterator _iter2376; + for (_iter2376 = this->success.begin(); _iter2376 != this->success.end(); ++_iter2376) { - xfer += (*_iter2370).write(oprot); + xfer += (*_iter2376).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26985,14 +26985,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2371; - ::apache::thrift::protocol::TType _etype2374; - xfer += iprot->readListBegin(_etype2374, _size2371); - (*(this->success)).resize(_size2371); - uint32_t _i2375; - for (_i2375 = 0; _i2375 < _size2371; ++_i2375) + uint32_t _size2377; + ::apache::thrift::protocol::TType _etype2380; + xfer += iprot->readListBegin(_etype2380, _size2377); + (*(this->success)).resize(_size2377); + uint32_t _i2381; + for (_i2381 = 0; _i2381 < _size2377; ++_i2381) { - xfer += (*(this->success))[_i2375].read(iprot); + xfer += (*(this->success))[_i2381].read(iprot); } xfer += iprot->readListEnd(); } @@ -28023,14 +28023,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size2376; - ::apache::thrift::protocol::TType _etype2379; - xfer += iprot->readListBegin(_etype2379, _size2376); - this->new_parts.resize(_size2376); - uint32_t _i2380; - for (_i2380 = 0; _i2380 < _size2376; ++_i2380) + uint32_t _size2382; + ::apache::thrift::protocol::TType _etype2385; + xfer += iprot->readListBegin(_etype2385, _size2382); + this->new_parts.resize(_size2382); + uint32_t _i2386; + for (_i2386 = 0; _i2386 < _size2382; ++_i2386) { - xfer += this->new_parts[_i2380].read(iprot); + xfer += this->new_parts[_i2386].read(iprot); } xfer += iprot->readListEnd(); } @@ -28067,10 +28067,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter2381; - for (_iter2381 = this->new_parts.begin(); _iter2381 != this->new_parts.end(); ++_iter2381) + std::vector ::const_iterator _iter2387; + for (_iter2387 = this->new_parts.begin(); _iter2387 != this->new_parts.end(); ++_iter2387) { - xfer += (*_iter2381).write(oprot); + xfer += (*_iter2387).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28102,10 +28102,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter2382; - for (_iter2382 = (*(this->new_parts)).begin(); _iter2382 != (*(this->new_parts)).end(); ++_iter2382) + std::vector ::const_iterator _iter2388; + for (_iter2388 = (*(this->new_parts)).begin(); _iter2388 != (*(this->new_parts)).end(); ++_iter2388) { - xfer += (*_iter2382).write(oprot); + xfer += (*_iter2388).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28290,14 +28290,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size2383; - ::apache::thrift::protocol::TType _etype2386; - xfer += iprot->readListBegin(_etype2386, _size2383); - this->new_parts.resize(_size2383); - uint32_t _i2387; - for (_i2387 = 0; _i2387 < _size2383; ++_i2387) + uint32_t _size2389; + ::apache::thrift::protocol::TType _etype2392; + xfer += iprot->readListBegin(_etype2392, _size2389); + this->new_parts.resize(_size2389); + uint32_t _i2393; + for (_i2393 = 0; _i2393 < _size2389; ++_i2393) { - xfer += this->new_parts[_i2387].read(iprot); + xfer += this->new_parts[_i2393].read(iprot); } xfer += iprot->readListEnd(); } @@ -28342,10 +28342,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter2388; - for (_iter2388 = this->new_parts.begin(); _iter2388 != this->new_parts.end(); ++_iter2388) + std::vector ::const_iterator _iter2394; + for (_iter2394 = this->new_parts.begin(); _iter2394 != this->new_parts.end(); ++_iter2394) { - xfer += (*_iter2388).write(oprot); + xfer += (*_iter2394).write(oprot); } xfer += oprot->writeListEnd(); } @@ -28381,10 +28381,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter2389; - for (_iter2389 = (*(this->new_parts)).begin(); _iter2389 != (*(this->new_parts)).end(); ++_iter2389) + std::vector ::const_iterator _iter2395; + for (_iter2395 = (*(this->new_parts)).begin(); _iter2395 != (*(this->new_parts)).end(); ++_iter2395) { - xfer += (*_iter2389).write(oprot); + xfer += (*_iter2395).write(oprot); } xfer += oprot->writeListEnd(); } @@ -29055,14 +29055,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2390; - ::apache::thrift::protocol::TType _etype2393; - xfer += iprot->readListBegin(_etype2393, _size2390); - this->part_vals.resize(_size2390); - uint32_t _i2394; - for (_i2394 = 0; _i2394 < _size2390; ++_i2394) + uint32_t _size2396; + ::apache::thrift::protocol::TType _etype2399; + xfer += iprot->readListBegin(_etype2399, _size2396); + this->part_vals.resize(_size2396); + uint32_t _i2400; + for (_i2400 = 0; _i2400 < _size2396; ++_i2400) { - xfer += iprot->readString(this->part_vals[_i2394]); + xfer += iprot->readString(this->part_vals[_i2400]); } xfer += iprot->readListEnd(); } @@ -29107,10 +29107,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2395; - for (_iter2395 = this->part_vals.begin(); _iter2395 != this->part_vals.end(); ++_iter2395) + std::vector ::const_iterator _iter2401; + for (_iter2401 = this->part_vals.begin(); _iter2401 != this->part_vals.end(); ++_iter2401) { - xfer += oprot->writeString((*_iter2395)); + xfer += oprot->writeString((*_iter2401)); } xfer += oprot->writeListEnd(); } @@ -29146,10 +29146,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2396; - for (_iter2396 = (*(this->part_vals)).begin(); _iter2396 != (*(this->part_vals)).end(); ++_iter2396) + std::vector ::const_iterator _iter2402; + for (_iter2402 = (*(this->part_vals)).begin(); _iter2402 != (*(this->part_vals)).end(); ++_iter2402) { - xfer += oprot->writeString((*_iter2396)); + xfer += oprot->writeString((*_iter2402)); } xfer += oprot->writeListEnd(); } @@ -29549,14 +29549,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size2397; - ::apache::thrift::protocol::TType _etype2400; - xfer += iprot->readListBegin(_etype2400, _size2397); - this->part_vals.resize(_size2397); - uint32_t _i2401; - for (_i2401 = 0; _i2401 < _size2397; ++_i2401) + uint32_t _size2403; + ::apache::thrift::protocol::TType _etype2406; + xfer += iprot->readListBegin(_etype2406, _size2403); + this->part_vals.resize(_size2403); + uint32_t _i2407; + for (_i2407 = 0; _i2407 < _size2403; ++_i2407) { - xfer += iprot->readString(this->part_vals[_i2401]); + xfer += iprot->readString(this->part_vals[_i2407]); } xfer += iprot->readListEnd(); } @@ -29593,10 +29593,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter2402; - for (_iter2402 = this->part_vals.begin(); _iter2402 != this->part_vals.end(); ++_iter2402) + std::vector ::const_iterator _iter2408; + for (_iter2408 = this->part_vals.begin(); _iter2408 != this->part_vals.end(); ++_iter2408) { - xfer += oprot->writeString((*_iter2402)); + xfer += oprot->writeString((*_iter2408)); } xfer += oprot->writeListEnd(); } @@ -29624,10 +29624,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter2403; - for (_iter2403 = (*(this->part_vals)).begin(); _iter2403 != (*(this->part_vals)).end(); ++_iter2403) + std::vector ::const_iterator _iter2409; + for (_iter2409 = (*(this->part_vals)).begin(); _iter2409 != (*(this->part_vals)).end(); ++_iter2409) { - xfer += oprot->writeString((*_iter2403)); + xfer += oprot->writeString((*_iter2409)); } xfer += oprot->writeListEnd(); } @@ -30102,14 +30102,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2404; - ::apache::thrift::protocol::TType _etype2407; - xfer += iprot->readListBegin(_etype2407, _size2404); - this->success.resize(_size2404); - uint32_t _i2408; - for (_i2408 = 0; _i2408 < _size2404; ++_i2408) + uint32_t _size2410; + ::apache::thrift::protocol::TType _etype2413; + xfer += iprot->readListBegin(_etype2413, _size2410); + this->success.resize(_size2410); + uint32_t _i2414; + for (_i2414 = 0; _i2414 < _size2410; ++_i2414) { - xfer += iprot->readString(this->success[_i2408]); + xfer += iprot->readString(this->success[_i2414]); } xfer += iprot->readListEnd(); } @@ -30148,10 +30148,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2409; - for (_iter2409 = this->success.begin(); _iter2409 != this->success.end(); ++_iter2409) + std::vector ::const_iterator _iter2415; + for (_iter2415 = this->success.begin(); _iter2415 != this->success.end(); ++_iter2415) { - xfer += oprot->writeString((*_iter2409)); + xfer += oprot->writeString((*_iter2415)); } xfer += oprot->writeListEnd(); } @@ -30196,14 +30196,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2410; - ::apache::thrift::protocol::TType _etype2413; - xfer += iprot->readListBegin(_etype2413, _size2410); - (*(this->success)).resize(_size2410); - uint32_t _i2414; - for (_i2414 = 0; _i2414 < _size2410; ++_i2414) + uint32_t _size2416; + ::apache::thrift::protocol::TType _etype2419; + xfer += iprot->readListBegin(_etype2419, _size2416); + (*(this->success)).resize(_size2416); + uint32_t _i2420; + for (_i2420 = 0; _i2420 < _size2416; ++_i2420) { - xfer += iprot->readString((*(this->success))[_i2414]); + xfer += iprot->readString((*(this->success))[_i2420]); } xfer += iprot->readListEnd(); } @@ -30341,17 +30341,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size2415; - ::apache::thrift::protocol::TType _ktype2416; - ::apache::thrift::protocol::TType _vtype2417; - xfer += iprot->readMapBegin(_ktype2416, _vtype2417, _size2415); - uint32_t _i2419; - for (_i2419 = 0; _i2419 < _size2415; ++_i2419) + uint32_t _size2421; + ::apache::thrift::protocol::TType _ktype2422; + ::apache::thrift::protocol::TType _vtype2423; + xfer += iprot->readMapBegin(_ktype2422, _vtype2423, _size2421); + uint32_t _i2425; + for (_i2425 = 0; _i2425 < _size2421; ++_i2425) { - std::string _key2420; - xfer += iprot->readString(_key2420); - std::string& _val2421 = this->success[_key2420]; - xfer += iprot->readString(_val2421); + std::string _key2426; + xfer += iprot->readString(_key2426); + std::string& _val2427 = this->success[_key2426]; + xfer += iprot->readString(_val2427); } xfer += iprot->readMapEnd(); } @@ -30390,11 +30390,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter2422; - for (_iter2422 = this->success.begin(); _iter2422 != this->success.end(); ++_iter2422) + std::map ::const_iterator _iter2428; + for (_iter2428 = this->success.begin(); _iter2428 != this->success.end(); ++_iter2428) { - xfer += oprot->writeString(_iter2422->first); - xfer += oprot->writeString(_iter2422->second); + xfer += oprot->writeString(_iter2428->first); + xfer += oprot->writeString(_iter2428->second); } xfer += oprot->writeMapEnd(); } @@ -30439,17 +30439,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size2423; - ::apache::thrift::protocol::TType _ktype2424; - ::apache::thrift::protocol::TType _vtype2425; - xfer += iprot->readMapBegin(_ktype2424, _vtype2425, _size2423); - uint32_t _i2427; - for (_i2427 = 0; _i2427 < _size2423; ++_i2427) + uint32_t _size2429; + ::apache::thrift::protocol::TType _ktype2430; + ::apache::thrift::protocol::TType _vtype2431; + xfer += iprot->readMapBegin(_ktype2430, _vtype2431, _size2429); + uint32_t _i2433; + for (_i2433 = 0; _i2433 < _size2429; ++_i2433) { - std::string _key2428; - xfer += iprot->readString(_key2428); - std::string& _val2429 = (*(this->success))[_key2428]; - xfer += iprot->readString(_val2429); + std::string _key2434; + xfer += iprot->readString(_key2434); + std::string& _val2435 = (*(this->success))[_key2434]; + xfer += iprot->readString(_val2435); } xfer += iprot->readMapEnd(); } @@ -30524,17 +30524,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size2430; - ::apache::thrift::protocol::TType _ktype2431; - ::apache::thrift::protocol::TType _vtype2432; - xfer += iprot->readMapBegin(_ktype2431, _vtype2432, _size2430); - uint32_t _i2434; - for (_i2434 = 0; _i2434 < _size2430; ++_i2434) + uint32_t _size2436; + ::apache::thrift::protocol::TType _ktype2437; + ::apache::thrift::protocol::TType _vtype2438; + xfer += iprot->readMapBegin(_ktype2437, _vtype2438, _size2436); + uint32_t _i2440; + for (_i2440 = 0; _i2440 < _size2436; ++_i2440) { - std::string _key2435; - xfer += iprot->readString(_key2435); - std::string& _val2436 = this->part_vals[_key2435]; - xfer += iprot->readString(_val2436); + std::string _key2441; + xfer += iprot->readString(_key2441); + std::string& _val2442 = this->part_vals[_key2441]; + xfer += iprot->readString(_val2442); } xfer += iprot->readMapEnd(); } @@ -30545,9 +30545,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2437; - xfer += iprot->readI32(ecast2437); - this->eventType = static_cast(ecast2437); + int32_t ecast2443; + xfer += iprot->readI32(ecast2443); + this->eventType = static_cast(ecast2443); this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -30581,11 +30581,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter2438; - for (_iter2438 = this->part_vals.begin(); _iter2438 != this->part_vals.end(); ++_iter2438) + std::map ::const_iterator _iter2444; + for (_iter2444 = this->part_vals.begin(); _iter2444 != this->part_vals.end(); ++_iter2444) { - xfer += oprot->writeString(_iter2438->first); - xfer += oprot->writeString(_iter2438->second); + xfer += oprot->writeString(_iter2444->first); + xfer += oprot->writeString(_iter2444->second); } xfer += oprot->writeMapEnd(); } @@ -30621,11 +30621,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter2439; - for (_iter2439 = (*(this->part_vals)).begin(); _iter2439 != (*(this->part_vals)).end(); ++_iter2439) + std::map ::const_iterator _iter2445; + for (_iter2445 = (*(this->part_vals)).begin(); _iter2445 != (*(this->part_vals)).end(); ++_iter2445) { - xfer += oprot->writeString(_iter2439->first); - xfer += oprot->writeString(_iter2439->second); + xfer += oprot->writeString(_iter2445->first); + xfer += oprot->writeString(_iter2445->second); } xfer += oprot->writeMapEnd(); } @@ -30894,17 +30894,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size2440; - ::apache::thrift::protocol::TType _ktype2441; - ::apache::thrift::protocol::TType _vtype2442; - xfer += iprot->readMapBegin(_ktype2441, _vtype2442, _size2440); - uint32_t _i2444; - for (_i2444 = 0; _i2444 < _size2440; ++_i2444) + uint32_t _size2446; + ::apache::thrift::protocol::TType _ktype2447; + ::apache::thrift::protocol::TType _vtype2448; + xfer += iprot->readMapBegin(_ktype2447, _vtype2448, _size2446); + uint32_t _i2450; + for (_i2450 = 0; _i2450 < _size2446; ++_i2450) { - std::string _key2445; - xfer += iprot->readString(_key2445); - std::string& _val2446 = this->part_vals[_key2445]; - xfer += iprot->readString(_val2446); + std::string _key2451; + xfer += iprot->readString(_key2451); + std::string& _val2452 = this->part_vals[_key2451]; + xfer += iprot->readString(_val2452); } xfer += iprot->readMapEnd(); } @@ -30915,9 +30915,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2447; - xfer += iprot->readI32(ecast2447); - this->eventType = static_cast(ecast2447); + int32_t ecast2453; + xfer += iprot->readI32(ecast2453); + this->eventType = static_cast(ecast2453); this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -30951,11 +30951,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter2448; - for (_iter2448 = this->part_vals.begin(); _iter2448 != this->part_vals.end(); ++_iter2448) + std::map ::const_iterator _iter2454; + for (_iter2454 = this->part_vals.begin(); _iter2454 != this->part_vals.end(); ++_iter2454) { - xfer += oprot->writeString(_iter2448->first); - xfer += oprot->writeString(_iter2448->second); + xfer += oprot->writeString(_iter2454->first); + xfer += oprot->writeString(_iter2454->second); } xfer += oprot->writeMapEnd(); } @@ -30991,11 +30991,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter2449; - for (_iter2449 = (*(this->part_vals)).begin(); _iter2449 != (*(this->part_vals)).end(); ++_iter2449) + std::map ::const_iterator _iter2455; + for (_iter2455 = (*(this->part_vals)).begin(); _iter2455 != (*(this->part_vals)).end(); ++_iter2455) { - xfer += oprot->writeString(_iter2449->first); - xfer += oprot->writeString(_iter2449->second); + xfer += oprot->writeString(_iter2455->first); + xfer += oprot->writeString(_iter2455->second); } xfer += oprot->writeMapEnd(); } @@ -37124,14 +37124,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2450; - ::apache::thrift::protocol::TType _etype2453; - xfer += iprot->readListBegin(_etype2453, _size2450); - this->success.resize(_size2450); - uint32_t _i2454; - for (_i2454 = 0; _i2454 < _size2450; ++_i2454) + uint32_t _size2456; + ::apache::thrift::protocol::TType _etype2459; + xfer += iprot->readListBegin(_etype2459, _size2456); + this->success.resize(_size2456); + uint32_t _i2460; + for (_i2460 = 0; _i2460 < _size2456; ++_i2460) { - xfer += iprot->readString(this->success[_i2454]); + xfer += iprot->readString(this->success[_i2460]); } xfer += iprot->readListEnd(); } @@ -37170,10 +37170,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2455; - for (_iter2455 = this->success.begin(); _iter2455 != this->success.end(); ++_iter2455) + std::vector ::const_iterator _iter2461; + for (_iter2461 = this->success.begin(); _iter2461 != this->success.end(); ++_iter2461) { - xfer += oprot->writeString((*_iter2455)); + xfer += oprot->writeString((*_iter2461)); } xfer += oprot->writeListEnd(); } @@ -37218,14 +37218,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2456; - ::apache::thrift::protocol::TType _etype2459; - xfer += iprot->readListBegin(_etype2459, _size2456); - (*(this->success)).resize(_size2456); - uint32_t _i2460; - for (_i2460 = 0; _i2460 < _size2456; ++_i2460) + uint32_t _size2462; + ::apache::thrift::protocol::TType _etype2465; + xfer += iprot->readListBegin(_etype2465, _size2462); + (*(this->success)).resize(_size2462); + uint32_t _i2466; + for (_i2466 = 0; _i2466 < _size2462; ++_i2466) { - xfer += iprot->readString((*(this->success))[_i2460]); + xfer += iprot->readString((*(this->success))[_i2466]); } xfer += iprot->readListEnd(); } @@ -38185,14 +38185,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2461; - ::apache::thrift::protocol::TType _etype2464; - xfer += iprot->readListBegin(_etype2464, _size2461); - this->success.resize(_size2461); - uint32_t _i2465; - for (_i2465 = 0; _i2465 < _size2461; ++_i2465) + uint32_t _size2467; + ::apache::thrift::protocol::TType _etype2470; + xfer += iprot->readListBegin(_etype2470, _size2467); + this->success.resize(_size2467); + uint32_t _i2471; + for (_i2471 = 0; _i2471 < _size2467; ++_i2471) { - xfer += iprot->readString(this->success[_i2465]); + xfer += iprot->readString(this->success[_i2471]); } xfer += iprot->readListEnd(); } @@ -38231,10 +38231,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2466; - for (_iter2466 = this->success.begin(); _iter2466 != this->success.end(); ++_iter2466) + std::vector ::const_iterator _iter2472; + for (_iter2472 = this->success.begin(); _iter2472 != this->success.end(); ++_iter2472) { - xfer += oprot->writeString((*_iter2466)); + xfer += oprot->writeString((*_iter2472)); } xfer += oprot->writeListEnd(); } @@ -38279,14 +38279,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2467; - ::apache::thrift::protocol::TType _etype2470; - xfer += iprot->readListBegin(_etype2470, _size2467); - (*(this->success)).resize(_size2467); - uint32_t _i2471; - for (_i2471 = 0; _i2471 < _size2467; ++_i2471) + uint32_t _size2473; + ::apache::thrift::protocol::TType _etype2476; + xfer += iprot->readListBegin(_etype2476, _size2473); + (*(this->success)).resize(_size2473); + uint32_t _i2477; + for (_i2477 = 0; _i2477 < _size2473; ++_i2477) { - xfer += iprot->readString((*(this->success))[_i2471]); + xfer += iprot->readString((*(this->success))[_i2477]); } xfer += iprot->readListEnd(); } @@ -38359,9 +38359,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2472; - xfer += iprot->readI32(ecast2472); - this->principal_type = static_cast(ecast2472); + int32_t ecast2478; + xfer += iprot->readI32(ecast2478); + this->principal_type = static_cast(ecast2478); this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -38377,9 +38377,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2473; - xfer += iprot->readI32(ecast2473); - this->grantorType = static_cast(ecast2473); + int32_t ecast2479; + xfer += iprot->readI32(ecast2479); + this->grantorType = static_cast(ecast2479); this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -38650,9 +38650,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2474; - xfer += iprot->readI32(ecast2474); - this->principal_type = static_cast(ecast2474); + int32_t ecast2480; + xfer += iprot->readI32(ecast2480); + this->principal_type = static_cast(ecast2480); this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -38883,9 +38883,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2475; - xfer += iprot->readI32(ecast2475); - this->principal_type = static_cast(ecast2475); + int32_t ecast2481; + xfer += iprot->readI32(ecast2481); + this->principal_type = static_cast(ecast2481); this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -38974,14 +38974,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2476; - ::apache::thrift::protocol::TType _etype2479; - xfer += iprot->readListBegin(_etype2479, _size2476); - this->success.resize(_size2476); - uint32_t _i2480; - for (_i2480 = 0; _i2480 < _size2476; ++_i2480) + uint32_t _size2482; + ::apache::thrift::protocol::TType _etype2485; + xfer += iprot->readListBegin(_etype2485, _size2482); + this->success.resize(_size2482); + uint32_t _i2486; + for (_i2486 = 0; _i2486 < _size2482; ++_i2486) { - xfer += this->success[_i2480].read(iprot); + xfer += this->success[_i2486].read(iprot); } xfer += iprot->readListEnd(); } @@ -39020,10 +39020,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2481; - for (_iter2481 = this->success.begin(); _iter2481 != this->success.end(); ++_iter2481) + std::vector ::const_iterator _iter2487; + for (_iter2487 = this->success.begin(); _iter2487 != this->success.end(); ++_iter2487) { - xfer += (*_iter2481).write(oprot); + xfer += (*_iter2487).write(oprot); } xfer += oprot->writeListEnd(); } @@ -39068,14 +39068,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2482; - ::apache::thrift::protocol::TType _etype2485; - xfer += iprot->readListBegin(_etype2485, _size2482); - (*(this->success)).resize(_size2482); - uint32_t _i2486; - for (_i2486 = 0; _i2486 < _size2482; ++_i2486) + uint32_t _size2488; + ::apache::thrift::protocol::TType _etype2491; + xfer += iprot->readListBegin(_etype2491, _size2488); + (*(this->success)).resize(_size2488); + uint32_t _i2492; + for (_i2492 = 0; _i2492 < _size2488; ++_i2492) { - xfer += (*(this->success))[_i2486].read(iprot); + xfer += (*(this->success))[_i2492].read(iprot); } xfer += iprot->readListEnd(); } @@ -39771,14 +39771,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size2487; - ::apache::thrift::protocol::TType _etype2490; - xfer += iprot->readListBegin(_etype2490, _size2487); - this->group_names.resize(_size2487); - uint32_t _i2491; - for (_i2491 = 0; _i2491 < _size2487; ++_i2491) + uint32_t _size2493; + ::apache::thrift::protocol::TType _etype2496; + xfer += iprot->readListBegin(_etype2496, _size2493); + this->group_names.resize(_size2493); + uint32_t _i2497; + for (_i2497 = 0; _i2497 < _size2493; ++_i2497) { - xfer += iprot->readString(this->group_names[_i2491]); + xfer += iprot->readString(this->group_names[_i2497]); } xfer += iprot->readListEnd(); } @@ -39815,10 +39815,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter2492; - for (_iter2492 = this->group_names.begin(); _iter2492 != this->group_names.end(); ++_iter2492) + std::vector ::const_iterator _iter2498; + for (_iter2498 = this->group_names.begin(); _iter2498 != this->group_names.end(); ++_iter2498) { - xfer += oprot->writeString((*_iter2492)); + xfer += oprot->writeString((*_iter2498)); } xfer += oprot->writeListEnd(); } @@ -39850,10 +39850,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter2493; - for (_iter2493 = (*(this->group_names)).begin(); _iter2493 != (*(this->group_names)).end(); ++_iter2493) + std::vector ::const_iterator _iter2499; + for (_iter2499 = (*(this->group_names)).begin(); _iter2499 != (*(this->group_names)).end(); ++_iter2499) { - xfer += oprot->writeString((*_iter2493)); + xfer += oprot->writeString((*_iter2499)); } xfer += oprot->writeListEnd(); } @@ -40028,9 +40028,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast2494; - xfer += iprot->readI32(ecast2494); - this->principal_type = static_cast(ecast2494); + int32_t ecast2500; + xfer += iprot->readI32(ecast2500); + this->principal_type = static_cast(ecast2500); this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -40135,14 +40135,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2495; - ::apache::thrift::protocol::TType _etype2498; - xfer += iprot->readListBegin(_etype2498, _size2495); - this->success.resize(_size2495); - uint32_t _i2499; - for (_i2499 = 0; _i2499 < _size2495; ++_i2499) + uint32_t _size2501; + ::apache::thrift::protocol::TType _etype2504; + xfer += iprot->readListBegin(_etype2504, _size2501); + this->success.resize(_size2501); + uint32_t _i2505; + for (_i2505 = 0; _i2505 < _size2501; ++_i2505) { - xfer += this->success[_i2499].read(iprot); + xfer += this->success[_i2505].read(iprot); } xfer += iprot->readListEnd(); } @@ -40181,10 +40181,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2500; - for (_iter2500 = this->success.begin(); _iter2500 != this->success.end(); ++_iter2500) + std::vector ::const_iterator _iter2506; + for (_iter2506 = this->success.begin(); _iter2506 != this->success.end(); ++_iter2506) { - xfer += (*_iter2500).write(oprot); + xfer += (*_iter2506).write(oprot); } xfer += oprot->writeListEnd(); } @@ -40229,14 +40229,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2501; - ::apache::thrift::protocol::TType _etype2504; - xfer += iprot->readListBegin(_etype2504, _size2501); - (*(this->success)).resize(_size2501); - uint32_t _i2505; - for (_i2505 = 0; _i2505 < _size2501; ++_i2505) + uint32_t _size2507; + ::apache::thrift::protocol::TType _etype2510; + xfer += iprot->readListBegin(_etype2510, _size2507); + (*(this->success)).resize(_size2507); + uint32_t _i2511; + for (_i2511 = 0; _i2511 < _size2507; ++_i2511) { - xfer += (*(this->success))[_i2505].read(iprot); + xfer += (*(this->success))[_i2511].read(iprot); } xfer += iprot->readListEnd(); } @@ -41163,14 +41163,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size2506; - ::apache::thrift::protocol::TType _etype2509; - xfer += iprot->readListBegin(_etype2509, _size2506); - this->group_names.resize(_size2506); - uint32_t _i2510; - for (_i2510 = 0; _i2510 < _size2506; ++_i2510) + uint32_t _size2512; + ::apache::thrift::protocol::TType _etype2515; + xfer += iprot->readListBegin(_etype2515, _size2512); + this->group_names.resize(_size2512); + uint32_t _i2516; + for (_i2516 = 0; _i2516 < _size2512; ++_i2516) { - xfer += iprot->readString(this->group_names[_i2510]); + xfer += iprot->readString(this->group_names[_i2516]); } xfer += iprot->readListEnd(); } @@ -41203,10 +41203,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter2511; - for (_iter2511 = this->group_names.begin(); _iter2511 != this->group_names.end(); ++_iter2511) + std::vector ::const_iterator _iter2517; + for (_iter2517 = this->group_names.begin(); _iter2517 != this->group_names.end(); ++_iter2517) { - xfer += oprot->writeString((*_iter2511)); + xfer += oprot->writeString((*_iter2517)); } xfer += oprot->writeListEnd(); } @@ -41234,10 +41234,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter2512; - for (_iter2512 = (*(this->group_names)).begin(); _iter2512 != (*(this->group_names)).end(); ++_iter2512) + std::vector ::const_iterator _iter2518; + for (_iter2518 = (*(this->group_names)).begin(); _iter2518 != (*(this->group_names)).end(); ++_iter2518) { - xfer += oprot->writeString((*_iter2512)); + xfer += oprot->writeString((*_iter2518)); } xfer += oprot->writeListEnd(); } @@ -41278,14 +41278,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2513; - ::apache::thrift::protocol::TType _etype2516; - xfer += iprot->readListBegin(_etype2516, _size2513); - this->success.resize(_size2513); - uint32_t _i2517; - for (_i2517 = 0; _i2517 < _size2513; ++_i2517) + uint32_t _size2519; + ::apache::thrift::protocol::TType _etype2522; + xfer += iprot->readListBegin(_etype2522, _size2519); + this->success.resize(_size2519); + uint32_t _i2523; + for (_i2523 = 0; _i2523 < _size2519; ++_i2523) { - xfer += iprot->readString(this->success[_i2517]); + xfer += iprot->readString(this->success[_i2523]); } xfer += iprot->readListEnd(); } @@ -41324,10 +41324,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2518; - for (_iter2518 = this->success.begin(); _iter2518 != this->success.end(); ++_iter2518) + std::vector ::const_iterator _iter2524; + for (_iter2524 = this->success.begin(); _iter2524 != this->success.end(); ++_iter2524) { - xfer += oprot->writeString((*_iter2518)); + xfer += oprot->writeString((*_iter2524)); } xfer += oprot->writeListEnd(); } @@ -41372,14 +41372,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2519; - ::apache::thrift::protocol::TType _etype2522; - xfer += iprot->readListBegin(_etype2522, _size2519); - (*(this->success)).resize(_size2519); - uint32_t _i2523; - for (_i2523 = 0; _i2523 < _size2519; ++_i2523) + uint32_t _size2525; + ::apache::thrift::protocol::TType _etype2528; + xfer += iprot->readListBegin(_etype2528, _size2525); + (*(this->success)).resize(_size2525); + uint32_t _i2529; + for (_i2529 = 0; _i2529 < _size2525; ++_i2529) { - xfer += iprot->readString((*(this->success))[_i2523]); + xfer += iprot->readString((*(this->success))[_i2529]); } xfer += iprot->readListEnd(); } @@ -42690,14 +42690,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2524; - ::apache::thrift::protocol::TType _etype2527; - xfer += iprot->readListBegin(_etype2527, _size2524); - this->success.resize(_size2524); - uint32_t _i2528; - for (_i2528 = 0; _i2528 < _size2524; ++_i2528) + uint32_t _size2530; + ::apache::thrift::protocol::TType _etype2533; + xfer += iprot->readListBegin(_etype2533, _size2530); + this->success.resize(_size2530); + uint32_t _i2534; + for (_i2534 = 0; _i2534 < _size2530; ++_i2534) { - xfer += iprot->readString(this->success[_i2528]); + xfer += iprot->readString(this->success[_i2534]); } xfer += iprot->readListEnd(); } @@ -42728,10 +42728,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2529; - for (_iter2529 = this->success.begin(); _iter2529 != this->success.end(); ++_iter2529) + std::vector ::const_iterator _iter2535; + for (_iter2535 = this->success.begin(); _iter2535 != this->success.end(); ++_iter2535) { - xfer += oprot->writeString((*_iter2529)); + xfer += oprot->writeString((*_iter2535)); } xfer += oprot->writeListEnd(); } @@ -42772,14 +42772,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2530; - ::apache::thrift::protocol::TType _etype2533; - xfer += iprot->readListBegin(_etype2533, _size2530); - (*(this->success)).resize(_size2530); - uint32_t _i2534; - for (_i2534 = 0; _i2534 < _size2530; ++_i2534) + uint32_t _size2536; + ::apache::thrift::protocol::TType _etype2539; + xfer += iprot->readListBegin(_etype2539, _size2536); + (*(this->success)).resize(_size2536); + uint32_t _i2540; + for (_i2540 = 0; _i2540 < _size2536; ++_i2540) { - xfer += iprot->readString((*(this->success))[_i2534]); + xfer += iprot->readString((*(this->success))[_i2540]); } xfer += iprot->readListEnd(); } @@ -43505,14 +43505,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2535; - ::apache::thrift::protocol::TType _etype2538; - xfer += iprot->readListBegin(_etype2538, _size2535); - this->success.resize(_size2535); - uint32_t _i2539; - for (_i2539 = 0; _i2539 < _size2535; ++_i2539) + uint32_t _size2541; + ::apache::thrift::protocol::TType _etype2544; + xfer += iprot->readListBegin(_etype2544, _size2541); + this->success.resize(_size2541); + uint32_t _i2545; + for (_i2545 = 0; _i2545 < _size2541; ++_i2545) { - xfer += iprot->readString(this->success[_i2539]); + xfer += iprot->readString(this->success[_i2545]); } xfer += iprot->readListEnd(); } @@ -43543,10 +43543,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2540; - for (_iter2540 = this->success.begin(); _iter2540 != this->success.end(); ++_iter2540) + std::vector ::const_iterator _iter2546; + for (_iter2546 = this->success.begin(); _iter2546 != this->success.end(); ++_iter2546) { - xfer += oprot->writeString((*_iter2540)); + xfer += oprot->writeString((*_iter2546)); } xfer += oprot->writeListEnd(); } @@ -43587,14 +43587,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2541; - ::apache::thrift::protocol::TType _etype2544; - xfer += iprot->readListBegin(_etype2544, _size2541); - (*(this->success)).resize(_size2541); - uint32_t _i2545; - for (_i2545 = 0; _i2545 < _size2541; ++_i2545) + uint32_t _size2547; + ::apache::thrift::protocol::TType _etype2550; + xfer += iprot->readListBegin(_etype2550, _size2547); + (*(this->success)).resize(_size2547); + uint32_t _i2551; + for (_i2551 = 0; _i2551 < _size2547; ++_i2551) { - xfer += iprot->readString((*(this->success))[_i2545]); + xfer += iprot->readString((*(this->success))[_i2551]); } xfer += iprot->readListEnd(); } @@ -45343,17 +45343,17 @@ uint32_t ThriftHiveMetastore_add_write_ids_to_min_history_args::read(::apache::t if (ftype == ::apache::thrift::protocol::T_MAP) { { this->writeIds.clear(); - uint32_t _size2546; - ::apache::thrift::protocol::TType _ktype2547; - ::apache::thrift::protocol::TType _vtype2548; - xfer += iprot->readMapBegin(_ktype2547, _vtype2548, _size2546); - uint32_t _i2550; - for (_i2550 = 0; _i2550 < _size2546; ++_i2550) + uint32_t _size2552; + ::apache::thrift::protocol::TType _ktype2553; + ::apache::thrift::protocol::TType _vtype2554; + xfer += iprot->readMapBegin(_ktype2553, _vtype2554, _size2552); + uint32_t _i2556; + for (_i2556 = 0; _i2556 < _size2552; ++_i2556) { - std::string _key2551; - xfer += iprot->readString(_key2551); - int64_t& _val2552 = this->writeIds[_key2551]; - xfer += iprot->readI64(_val2552); + std::string _key2557; + xfer += iprot->readString(_key2557); + int64_t& _val2558 = this->writeIds[_key2557]; + xfer += iprot->readI64(_val2558); } xfer += iprot->readMapEnd(); } @@ -45386,11 +45386,11 @@ uint32_t ThriftHiveMetastore_add_write_ids_to_min_history_args::write(::apache:: xfer += oprot->writeFieldBegin("writeIds", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I64, static_cast(this->writeIds.size())); - std::map ::const_iterator _iter2553; - for (_iter2553 = this->writeIds.begin(); _iter2553 != this->writeIds.end(); ++_iter2553) + std::map ::const_iterator _iter2559; + for (_iter2559 = this->writeIds.begin(); _iter2559 != this->writeIds.end(); ++_iter2559) { - xfer += oprot->writeString(_iter2553->first); - xfer += oprot->writeI64(_iter2553->second); + xfer += oprot->writeString(_iter2559->first); + xfer += oprot->writeI64(_iter2559->second); } xfer += oprot->writeMapEnd(); } @@ -45418,11 +45418,11 @@ uint32_t ThriftHiveMetastore_add_write_ids_to_min_history_pargs::write(::apache: xfer += oprot->writeFieldBegin("writeIds", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_I64, static_cast((*(this->writeIds)).size())); - std::map ::const_iterator _iter2554; - for (_iter2554 = (*(this->writeIds)).begin(); _iter2554 != (*(this->writeIds)).end(); ++_iter2554) + std::map ::const_iterator _iter2560; + for (_iter2560 = (*(this->writeIds)).begin(); _iter2560 != (*(this->writeIds)).end(); ++_iter2560) { - xfer += oprot->writeString(_iter2554->first); - xfer += oprot->writeI64(_iter2554->second); + xfer += oprot->writeString(_iter2560->first); + xfer += oprot->writeI64(_iter2560->second); } xfer += oprot->writeMapEnd(); } @@ -49322,14 +49322,14 @@ uint32_t ThriftHiveMetastore_find_columns_with_stats_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2555; - ::apache::thrift::protocol::TType _etype2558; - xfer += iprot->readListBegin(_etype2558, _size2555); - this->success.resize(_size2555); - uint32_t _i2559; - for (_i2559 = 0; _i2559 < _size2555; ++_i2559) + uint32_t _size2561; + ::apache::thrift::protocol::TType _etype2564; + xfer += iprot->readListBegin(_etype2564, _size2561); + this->success.resize(_size2561); + uint32_t _i2565; + for (_i2565 = 0; _i2565 < _size2561; ++_i2565) { - xfer += iprot->readString(this->success[_i2559]); + xfer += iprot->readString(this->success[_i2565]); } xfer += iprot->readListEnd(); } @@ -49360,10 +49360,10 @@ uint32_t ThriftHiveMetastore_find_columns_with_stats_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2560; - for (_iter2560 = this->success.begin(); _iter2560 != this->success.end(); ++_iter2560) + std::vector ::const_iterator _iter2566; + for (_iter2566 = this->success.begin(); _iter2566 != this->success.end(); ++_iter2566) { - xfer += oprot->writeString((*_iter2560)); + xfer += oprot->writeString((*_iter2566)); } xfer += oprot->writeListEnd(); } @@ -49404,14 +49404,14 @@ uint32_t ThriftHiveMetastore_find_columns_with_stats_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2561; - ::apache::thrift::protocol::TType _etype2564; - xfer += iprot->readListBegin(_etype2564, _size2561); - (*(this->success)).resize(_size2561); - uint32_t _i2565; - for (_i2565 = 0; _i2565 < _size2561; ++_i2565) + uint32_t _size2567; + ::apache::thrift::protocol::TType _etype2570; + xfer += iprot->readListBegin(_etype2570, _size2567); + (*(this->success)).resize(_size2567); + uint32_t _i2571; + for (_i2571 = 0; _i2571 < _size2567; ++_i2571) { - xfer += iprot->readString((*(this->success))[_i2565]); + xfer += iprot->readString((*(this->success))[_i2571]); } xfer += iprot->readListEnd(); } @@ -59334,14 +59334,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2566; - ::apache::thrift::protocol::TType _etype2569; - xfer += iprot->readListBegin(_etype2569, _size2566); - this->success.resize(_size2566); - uint32_t _i2570; - for (_i2570 = 0; _i2570 < _size2566; ++_i2570) + uint32_t _size2572; + ::apache::thrift::protocol::TType _etype2575; + xfer += iprot->readListBegin(_etype2575, _size2572); + this->success.resize(_size2572); + uint32_t _i2576; + for (_i2576 = 0; _i2576 < _size2572; ++_i2576) { - xfer += this->success[_i2570].read(iprot); + xfer += this->success[_i2576].read(iprot); } xfer += iprot->readListEnd(); } @@ -59388,10 +59388,10 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2571; - for (_iter2571 = this->success.begin(); _iter2571 != this->success.end(); ++_iter2571) + std::vector ::const_iterator _iter2577; + for (_iter2577 = this->success.begin(); _iter2577 != this->success.end(); ++_iter2577) { - xfer += (*_iter2571).write(oprot); + xfer += (*_iter2577).write(oprot); } xfer += oprot->writeListEnd(); } @@ -59440,14 +59440,14 @@ uint32_t ThriftHiveMetastore_get_schema_all_versions_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2572; - ::apache::thrift::protocol::TType _etype2575; - xfer += iprot->readListBegin(_etype2575, _size2572); - (*(this->success)).resize(_size2572); - uint32_t _i2576; - for (_i2576 = 0; _i2576 < _size2572; ++_i2576) + uint32_t _size2578; + ::apache::thrift::protocol::TType _etype2581; + xfer += iprot->readListBegin(_etype2581, _size2578); + (*(this->success)).resize(_size2578); + uint32_t _i2582; + for (_i2582 = 0; _i2582 < _size2578; ++_i2582) { - xfer += (*(this->success))[_i2576].read(iprot); + xfer += (*(this->success))[_i2582].read(iprot); } xfer += iprot->readListEnd(); } @@ -61500,14 +61500,14 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2577; - ::apache::thrift::protocol::TType _etype2580; - xfer += iprot->readListBegin(_etype2580, _size2577); - this->success.resize(_size2577); - uint32_t _i2581; - for (_i2581 = 0; _i2581 < _size2577; ++_i2581) + uint32_t _size2583; + ::apache::thrift::protocol::TType _etype2586; + xfer += iprot->readListBegin(_etype2586, _size2583); + this->success.resize(_size2583); + uint32_t _i2587; + for (_i2587 = 0; _i2587 < _size2583; ++_i2587) { - xfer += this->success[_i2581].read(iprot); + xfer += this->success[_i2587].read(iprot); } xfer += iprot->readListEnd(); } @@ -61546,10 +61546,10 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2582; - for (_iter2582 = this->success.begin(); _iter2582 != this->success.end(); ++_iter2582) + std::vector ::const_iterator _iter2588; + for (_iter2588 = this->success.begin(); _iter2588 != this->success.end(); ++_iter2588) { - xfer += (*_iter2582).write(oprot); + xfer += (*_iter2588).write(oprot); } xfer += oprot->writeListEnd(); } @@ -61594,14 +61594,14 @@ uint32_t ThriftHiveMetastore_get_runtime_stats_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2583; - ::apache::thrift::protocol::TType _etype2586; - xfer += iprot->readListBegin(_etype2586, _size2583); - (*(this->success)).resize(_size2583); - uint32_t _i2587; - for (_i2587 = 0; _i2587 < _size2583; ++_i2587) + uint32_t _size2589; + ::apache::thrift::protocol::TType _etype2592; + xfer += iprot->readListBegin(_etype2592, _size2589); + (*(this->success)).resize(_size2589); + uint32_t _i2593; + for (_i2593 = 0; _i2593 < _size2589; ++_i2593) { - xfer += (*(this->success))[_i2587].read(iprot); + xfer += (*(this->success))[_i2593].read(iprot); } xfer += iprot->readListEnd(); } @@ -64036,14 +64036,14 @@ uint32_t ThriftHiveMetastore_get_all_stored_procedures_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2588; - ::apache::thrift::protocol::TType _etype2591; - xfer += iprot->readListBegin(_etype2591, _size2588); - this->success.resize(_size2588); - uint32_t _i2592; - for (_i2592 = 0; _i2592 < _size2588; ++_i2592) + uint32_t _size2594; + ::apache::thrift::protocol::TType _etype2597; + xfer += iprot->readListBegin(_etype2597, _size2594); + this->success.resize(_size2594); + uint32_t _i2598; + for (_i2598 = 0; _i2598 < _size2594; ++_i2598) { - xfer += iprot->readString(this->success[_i2592]); + xfer += iprot->readString(this->success[_i2598]); } xfer += iprot->readListEnd(); } @@ -64082,10 +64082,10 @@ uint32_t ThriftHiveMetastore_get_all_stored_procedures_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2593; - for (_iter2593 = this->success.begin(); _iter2593 != this->success.end(); ++_iter2593) + std::vector ::const_iterator _iter2599; + for (_iter2599 = this->success.begin(); _iter2599 != this->success.end(); ++_iter2599) { - xfer += oprot->writeString((*_iter2593)); + xfer += oprot->writeString((*_iter2599)); } xfer += oprot->writeListEnd(); } @@ -64130,14 +64130,14 @@ uint32_t ThriftHiveMetastore_get_all_stored_procedures_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2594; - ::apache::thrift::protocol::TType _etype2597; - xfer += iprot->readListBegin(_etype2597, _size2594); - (*(this->success)).resize(_size2594); - uint32_t _i2598; - for (_i2598 = 0; _i2598 < _size2594; ++_i2598) + uint32_t _size2600; + ::apache::thrift::protocol::TType _etype2603; + xfer += iprot->readListBegin(_etype2603, _size2600); + (*(this->success)).resize(_size2600); + uint32_t _i2604; + for (_i2604 = 0; _i2604 < _size2600; ++_i2604) { - xfer += iprot->readString((*(this->success))[_i2598]); + xfer += iprot->readString((*(this->success))[_i2604]); } xfer += iprot->readListEnd(); } @@ -64689,14 +64689,14 @@ uint32_t ThriftHiveMetastore_get_all_packages_result::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2599; - ::apache::thrift::protocol::TType _etype2602; - xfer += iprot->readListBegin(_etype2602, _size2599); - this->success.resize(_size2599); - uint32_t _i2603; - for (_i2603 = 0; _i2603 < _size2599; ++_i2603) + uint32_t _size2605; + ::apache::thrift::protocol::TType _etype2608; + xfer += iprot->readListBegin(_etype2608, _size2605); + this->success.resize(_size2605); + uint32_t _i2609; + for (_i2609 = 0; _i2609 < _size2605; ++_i2609) { - xfer += iprot->readString(this->success[_i2603]); + xfer += iprot->readString(this->success[_i2609]); } xfer += iprot->readListEnd(); } @@ -64735,10 +64735,10 @@ uint32_t ThriftHiveMetastore_get_all_packages_result::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter2604; - for (_iter2604 = this->success.begin(); _iter2604 != this->success.end(); ++_iter2604) + std::vector ::const_iterator _iter2610; + for (_iter2610 = this->success.begin(); _iter2610 != this->success.end(); ++_iter2610) { - xfer += oprot->writeString((*_iter2604)); + xfer += oprot->writeString((*_iter2610)); } xfer += oprot->writeListEnd(); } @@ -64783,14 +64783,14 @@ uint32_t ThriftHiveMetastore_get_all_packages_presult::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2605; - ::apache::thrift::protocol::TType _etype2608; - xfer += iprot->readListBegin(_etype2608, _size2605); - (*(this->success)).resize(_size2605); - uint32_t _i2609; - for (_i2609 = 0; _i2609 < _size2605; ++_i2609) + uint32_t _size2611; + ::apache::thrift::protocol::TType _etype2614; + xfer += iprot->readListBegin(_etype2614, _size2611); + (*(this->success)).resize(_size2611); + uint32_t _i2615; + for (_i2615 = 0; _i2615 < _size2611; ++_i2615) { - xfer += iprot->readString((*(this->success))[_i2609]); + xfer += iprot->readString((*(this->success))[_i2615]); } xfer += iprot->readListEnd(); } @@ -65115,14 +65115,14 @@ uint32_t ThriftHiveMetastore_get_all_write_event_info_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size2610; - ::apache::thrift::protocol::TType _etype2613; - xfer += iprot->readListBegin(_etype2613, _size2610); - this->success.resize(_size2610); - uint32_t _i2614; - for (_i2614 = 0; _i2614 < _size2610; ++_i2614) + uint32_t _size2616; + ::apache::thrift::protocol::TType _etype2619; + xfer += iprot->readListBegin(_etype2619, _size2616); + this->success.resize(_size2616); + uint32_t _i2620; + for (_i2620 = 0; _i2620 < _size2616; ++_i2620) { - xfer += this->success[_i2614].read(iprot); + xfer += this->success[_i2620].read(iprot); } xfer += iprot->readListEnd(); } @@ -65161,10 +65161,10 @@ uint32_t ThriftHiveMetastore_get_all_write_event_info_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter2615; - for (_iter2615 = this->success.begin(); _iter2615 != this->success.end(); ++_iter2615) + std::vector ::const_iterator _iter2621; + for (_iter2621 = this->success.begin(); _iter2621 != this->success.end(); ++_iter2621) { - xfer += (*_iter2615).write(oprot); + xfer += (*_iter2621).write(oprot); } xfer += oprot->writeListEnd(); } @@ -65209,14 +65209,14 @@ uint32_t ThriftHiveMetastore_get_all_write_event_info_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size2616; - ::apache::thrift::protocol::TType _etype2619; - xfer += iprot->readListBegin(_etype2619, _size2616); - (*(this->success)).resize(_size2616); - uint32_t _i2620; - for (_i2620 = 0; _i2620 < _size2616; ++_i2620) + uint32_t _size2622; + ::apache::thrift::protocol::TType _etype2625; + xfer += iprot->readListBegin(_etype2625, _size2622); + (*(this->success)).resize(_size2622); + uint32_t _i2626; + for (_i2626 = 0; _i2626 < _size2622; ++_i2626) { - xfer += (*(this->success))[_i2620].read(iprot); + xfer += (*(this->success))[_i2626].read(iprot); } xfer += iprot->readListEnd(); } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 401d63df93fe..1ec6bcee3f19 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -13933,6 +13933,7 @@ __isset.validWriteIdList = true; void SetPartitionsStatsRequest::__set_engine(const std::string& val) { this->engine = val; +__isset.engine = true; } std::ostream& operator<<(std::ostream& out, const SetPartitionsStatsRequest& obj) { @@ -13954,7 +13955,6 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* using ::apache::thrift::protocol::TProtocolException; bool isset_colStats = false; - bool isset_engine = false; while (true) { @@ -14011,7 +14011,7 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* case 5: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->engine); - isset_engine = true; + this->__isset.engine = true; } else { xfer += iprot->skip(ftype); } @@ -14027,8 +14027,6 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (!isset_colStats) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_engine) - throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } @@ -14064,10 +14062,11 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } - xfer += oprot->writeFieldBegin("engine", ::apache::thrift::protocol::T_STRING, 5); - xfer += oprot->writeString(this->engine); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.engine) { + xfer += oprot->writeFieldBegin("engine", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->engine); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -14107,7 +14106,7 @@ void SetPartitionsStatsRequest::printTo(std::ostream& out) const { out << ", " << "needMerge="; (__isset.needMerge ? (out << to_string(needMerge)) : (out << "")); out << ", " << "writeId="; (__isset.writeId ? (out << to_string(writeId)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); - out << ", " << "engine=" << to_string(engine); + out << ", " << "engine="; (__isset.engine ? (out << to_string(engine)) : (out << "")); out << ")"; } @@ -18221,6 +18220,7 @@ __isset.validWriteIdList = true; void TableStatsRequest::__set_engine(const std::string& val) { this->engine = val; +__isset.engine = true; } void TableStatsRequest::__set_id(const int64_t val) { @@ -18249,7 +18249,6 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { bool isset_dbName = false; bool isset_tblName = false; bool isset_colNames = false; - bool isset_engine = false; while (true) { @@ -18314,7 +18313,7 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { case 6: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->engine); - isset_engine = true; + this->__isset.engine = true; } else { xfer += iprot->skip(ftype); } @@ -18342,8 +18341,6 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_colNames) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_engine) - throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } @@ -18382,10 +18379,11 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } - xfer += oprot->writeFieldBegin("engine", ::apache::thrift::protocol::T_STRING, 6); - xfer += oprot->writeString(this->engine); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.engine) { + xfer += oprot->writeFieldBegin("engine", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->engine); + xfer += oprot->writeFieldEnd(); + } if (this->__isset.id) { xfer += oprot->writeFieldBegin("id", ::apache::thrift::protocol::T_I64, 7); xfer += oprot->writeI64(this->id); @@ -18437,7 +18435,7 @@ void TableStatsRequest::printTo(std::ostream& out) const { out << ", " << "colNames=" << to_string(colNames); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); - out << ", " << "engine=" << to_string(engine); + out << ", " << "engine="; (__isset.engine ? (out << to_string(engine)) : (out << "")); out << ", " << "id="; (__isset.id ? (out << to_string(id)) : (out << "")); out << ")"; } @@ -18475,6 +18473,7 @@ __isset.validWriteIdList = true; void PartitionsStatsRequest::__set_engine(const std::string& val) { this->engine = val; +__isset.engine = true; } std::ostream& operator<<(std::ostream& out, const PartitionsStatsRequest& obj) { @@ -18499,7 +18498,6 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr bool isset_tblName = false; bool isset_colNames = false; bool isset_partNames = false; - bool isset_engine = false; while (true) { @@ -18584,7 +18582,7 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr case 7: if (ftype == ::apache::thrift::protocol::T_STRING) { xfer += iprot->readString(this->engine); - isset_engine = true; + this->__isset.engine = true; } else { xfer += iprot->skip(ftype); } @@ -18606,8 +18604,6 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr throw TProtocolException(TProtocolException::INVALID_DATA); if (!isset_partNames) throw TProtocolException(TProtocolException::INVALID_DATA); - if (!isset_engine) - throw TProtocolException(TProtocolException::INVALID_DATA); return xfer; } @@ -18658,10 +18654,11 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeString(this->validWriteIdList); xfer += oprot->writeFieldEnd(); } - xfer += oprot->writeFieldBegin("engine", ::apache::thrift::protocol::T_STRING, 7); - xfer += oprot->writeString(this->engine); - xfer += oprot->writeFieldEnd(); - + if (this->__isset.engine) { + xfer += oprot->writeFieldBegin("engine", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->engine); + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -18709,7 +18706,7 @@ void PartitionsStatsRequest::printTo(std::ostream& out) const { out << ", " << "partNames=" << to_string(partNames); out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "")); out << ", " << "validWriteIdList="; (__isset.validWriteIdList ? (out << to_string(validWriteIdList)) : (out << "")); - out << ", " << "engine=" << to_string(engine); + out << ", " << "engine="; (__isset.engine ? (out << to_string(engine)) : (out << "")); out << ")"; } @@ -50167,6 +50164,11 @@ void GetPartitionsPsWithAuthRequest::__set_excludeParamKeyPattern(const std::str this->excludeParamKeyPattern = val; __isset.excludeParamKeyPattern = true; } + +void GetPartitionsPsWithAuthRequest::__set_partNames(const std::vector & val) { + this->partNames = val; +__isset.partNames = true; +} std::ostream& operator<<(std::ostream& out, const GetPartitionsPsWithAuthRequest& obj) { obj.printTo(out); @@ -50317,6 +50319,26 @@ uint32_t GetPartitionsPsWithAuthRequest::read(::apache::thrift::protocol::TProto xfer += iprot->skip(ftype); } break; + case 13: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->partNames.clear(); + uint32_t _size1771; + ::apache::thrift::protocol::TType _etype1774; + xfer += iprot->readListBegin(_etype1774, _size1771); + this->partNames.resize(_size1771); + uint32_t _i1775; + for (_i1775 = 0; _i1775 < _size1771; ++_i1775) + { + xfer += iprot->readString(this->partNames[_i1775]); + } + xfer += iprot->readListEnd(); + } + this->__isset.partNames = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -50355,10 +50377,10 @@ uint32_t GetPartitionsPsWithAuthRequest::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("partVals", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partVals.size())); - std::vector ::const_iterator _iter1771; - for (_iter1771 = this->partVals.begin(); _iter1771 != this->partVals.end(); ++_iter1771) + std::vector ::const_iterator _iter1776; + for (_iter1776 = this->partVals.begin(); _iter1776 != this->partVals.end(); ++_iter1776) { - xfer += oprot->writeString((*_iter1771)); + xfer += oprot->writeString((*_iter1776)); } xfer += oprot->writeListEnd(); } @@ -50378,10 +50400,10 @@ uint32_t GetPartitionsPsWithAuthRequest::write(::apache::thrift::protocol::TProt xfer += oprot->writeFieldBegin("groupNames", ::apache::thrift::protocol::T_LIST, 7); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->groupNames.size())); - std::vector ::const_iterator _iter1772; - for (_iter1772 = this->groupNames.begin(); _iter1772 != this->groupNames.end(); ++_iter1772) + std::vector ::const_iterator _iter1777; + for (_iter1777 = this->groupNames.begin(); _iter1777 != this->groupNames.end(); ++_iter1777) { - xfer += oprot->writeString((*_iter1772)); + xfer += oprot->writeString((*_iter1777)); } xfer += oprot->writeListEnd(); } @@ -50412,6 +50434,19 @@ uint32_t GetPartitionsPsWithAuthRequest::write(::apache::thrift::protocol::TProt xfer += oprot->writeString(this->excludeParamKeyPattern); xfer += oprot->writeFieldEnd(); } + if (this->__isset.partNames) { + xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 13); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); + std::vector ::const_iterator _iter1778; + for (_iter1778 = this->partNames.begin(); _iter1778 != this->partNames.end(); ++_iter1778) + { + xfer += oprot->writeString((*_iter1778)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -50431,38 +50466,41 @@ void swap(GetPartitionsPsWithAuthRequest &a, GetPartitionsPsWithAuthRequest &b) swap(a.skipColumnSchemaForPartition, b.skipColumnSchemaForPartition); swap(a.includeParamKeyPattern, b.includeParamKeyPattern); swap(a.excludeParamKeyPattern, b.excludeParamKeyPattern); + swap(a.partNames, b.partNames); swap(a.__isset, b.__isset); } -GetPartitionsPsWithAuthRequest::GetPartitionsPsWithAuthRequest(const GetPartitionsPsWithAuthRequest& other1773) { - catName = other1773.catName; - dbName = other1773.dbName; - tblName = other1773.tblName; - partVals = other1773.partVals; - maxParts = other1773.maxParts; - userName = other1773.userName; - groupNames = other1773.groupNames; - validWriteIdList = other1773.validWriteIdList; - id = other1773.id; - skipColumnSchemaForPartition = other1773.skipColumnSchemaForPartition; - includeParamKeyPattern = other1773.includeParamKeyPattern; - excludeParamKeyPattern = other1773.excludeParamKeyPattern; - __isset = other1773.__isset; -} -GetPartitionsPsWithAuthRequest& GetPartitionsPsWithAuthRequest::operator=(const GetPartitionsPsWithAuthRequest& other1774) { - catName = other1774.catName; - dbName = other1774.dbName; - tblName = other1774.tblName; - partVals = other1774.partVals; - maxParts = other1774.maxParts; - userName = other1774.userName; - groupNames = other1774.groupNames; - validWriteIdList = other1774.validWriteIdList; - id = other1774.id; - skipColumnSchemaForPartition = other1774.skipColumnSchemaForPartition; - includeParamKeyPattern = other1774.includeParamKeyPattern; - excludeParamKeyPattern = other1774.excludeParamKeyPattern; - __isset = other1774.__isset; +GetPartitionsPsWithAuthRequest::GetPartitionsPsWithAuthRequest(const GetPartitionsPsWithAuthRequest& other1779) { + catName = other1779.catName; + dbName = other1779.dbName; + tblName = other1779.tblName; + partVals = other1779.partVals; + maxParts = other1779.maxParts; + userName = other1779.userName; + groupNames = other1779.groupNames; + validWriteIdList = other1779.validWriteIdList; + id = other1779.id; + skipColumnSchemaForPartition = other1779.skipColumnSchemaForPartition; + includeParamKeyPattern = other1779.includeParamKeyPattern; + excludeParamKeyPattern = other1779.excludeParamKeyPattern; + partNames = other1779.partNames; + __isset = other1779.__isset; +} +GetPartitionsPsWithAuthRequest& GetPartitionsPsWithAuthRequest::operator=(const GetPartitionsPsWithAuthRequest& other1780) { + catName = other1780.catName; + dbName = other1780.dbName; + tblName = other1780.tblName; + partVals = other1780.partVals; + maxParts = other1780.maxParts; + userName = other1780.userName; + groupNames = other1780.groupNames; + validWriteIdList = other1780.validWriteIdList; + id = other1780.id; + skipColumnSchemaForPartition = other1780.skipColumnSchemaForPartition; + includeParamKeyPattern = other1780.includeParamKeyPattern; + excludeParamKeyPattern = other1780.excludeParamKeyPattern; + partNames = other1780.partNames; + __isset = other1780.__isset; return *this; } void GetPartitionsPsWithAuthRequest::printTo(std::ostream& out) const { @@ -50480,6 +50518,7 @@ void GetPartitionsPsWithAuthRequest::printTo(std::ostream& out) const { out << ", " << "skipColumnSchemaForPartition="; (__isset.skipColumnSchemaForPartition ? (out << to_string(skipColumnSchemaForPartition)) : (out << "")); out << ", " << "includeParamKeyPattern="; (__isset.includeParamKeyPattern ? (out << to_string(includeParamKeyPattern)) : (out << "")); out << ", " << "excludeParamKeyPattern="; (__isset.excludeParamKeyPattern ? (out << to_string(excludeParamKeyPattern)) : (out << "")); + out << ", " << "partNames="; (__isset.partNames ? (out << to_string(partNames)) : (out << "")); out << ")"; } @@ -50524,14 +50563,14 @@ uint32_t GetPartitionsPsWithAuthResponse::read(::apache::thrift::protocol::TProt if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size1775; - ::apache::thrift::protocol::TType _etype1778; - xfer += iprot->readListBegin(_etype1778, _size1775); - this->partitions.resize(_size1775); - uint32_t _i1779; - for (_i1779 = 0; _i1779 < _size1775; ++_i1779) + uint32_t _size1781; + ::apache::thrift::protocol::TType _etype1784; + xfer += iprot->readListBegin(_etype1784, _size1781); + this->partitions.resize(_size1781); + uint32_t _i1785; + for (_i1785 = 0; _i1785 < _size1781; ++_i1785) { - xfer += this->partitions[_i1779].read(iprot); + xfer += this->partitions[_i1785].read(iprot); } xfer += iprot->readListEnd(); } @@ -50562,10 +50601,10 @@ uint32_t GetPartitionsPsWithAuthResponse::write(::apache::thrift::protocol::TPro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter1780; - for (_iter1780 = this->partitions.begin(); _iter1780 != this->partitions.end(); ++_iter1780) + std::vector ::const_iterator _iter1786; + for (_iter1786 = this->partitions.begin(); _iter1786 != this->partitions.end(); ++_iter1786) { - xfer += (*_iter1780).write(oprot); + xfer += (*_iter1786).write(oprot); } xfer += oprot->writeListEnd(); } @@ -50581,11 +50620,11 @@ void swap(GetPartitionsPsWithAuthResponse &a, GetPartitionsPsWithAuthResponse &b swap(a.partitions, b.partitions); } -GetPartitionsPsWithAuthResponse::GetPartitionsPsWithAuthResponse(const GetPartitionsPsWithAuthResponse& other1781) { - partitions = other1781.partitions; +GetPartitionsPsWithAuthResponse::GetPartitionsPsWithAuthResponse(const GetPartitionsPsWithAuthResponse& other1787) { + partitions = other1787.partitions; } -GetPartitionsPsWithAuthResponse& GetPartitionsPsWithAuthResponse::operator=(const GetPartitionsPsWithAuthResponse& other1782) { - partitions = other1782.partitions; +GetPartitionsPsWithAuthResponse& GetPartitionsPsWithAuthResponse::operator=(const GetPartitionsPsWithAuthResponse& other1788) { + partitions = other1788.partitions; return *this; } void GetPartitionsPsWithAuthResponse::printTo(std::ostream& out) const { @@ -50771,23 +50810,23 @@ void swap(ReplicationMetrics &a, ReplicationMetrics &b) { swap(a.__isset, b.__isset); } -ReplicationMetrics::ReplicationMetrics(const ReplicationMetrics& other1783) { - scheduledExecutionId = other1783.scheduledExecutionId; - policy = other1783.policy; - dumpExecutionId = other1783.dumpExecutionId; - metadata = other1783.metadata; - progress = other1783.progress; - messageFormat = other1783.messageFormat; - __isset = other1783.__isset; +ReplicationMetrics::ReplicationMetrics(const ReplicationMetrics& other1789) { + scheduledExecutionId = other1789.scheduledExecutionId; + policy = other1789.policy; + dumpExecutionId = other1789.dumpExecutionId; + metadata = other1789.metadata; + progress = other1789.progress; + messageFormat = other1789.messageFormat; + __isset = other1789.__isset; } -ReplicationMetrics& ReplicationMetrics::operator=(const ReplicationMetrics& other1784) { - scheduledExecutionId = other1784.scheduledExecutionId; - policy = other1784.policy; - dumpExecutionId = other1784.dumpExecutionId; - metadata = other1784.metadata; - progress = other1784.progress; - messageFormat = other1784.messageFormat; - __isset = other1784.__isset; +ReplicationMetrics& ReplicationMetrics::operator=(const ReplicationMetrics& other1790) { + scheduledExecutionId = other1790.scheduledExecutionId; + policy = other1790.policy; + dumpExecutionId = other1790.dumpExecutionId; + metadata = other1790.metadata; + progress = other1790.progress; + messageFormat = other1790.messageFormat; + __isset = other1790.__isset; return *this; } void ReplicationMetrics::printTo(std::ostream& out) const { @@ -50843,14 +50882,14 @@ uint32_t ReplicationMetricList::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->replicationMetricList.clear(); - uint32_t _size1785; - ::apache::thrift::protocol::TType _etype1788; - xfer += iprot->readListBegin(_etype1788, _size1785); - this->replicationMetricList.resize(_size1785); - uint32_t _i1789; - for (_i1789 = 0; _i1789 < _size1785; ++_i1789) + uint32_t _size1791; + ::apache::thrift::protocol::TType _etype1794; + xfer += iprot->readListBegin(_etype1794, _size1791); + this->replicationMetricList.resize(_size1791); + uint32_t _i1795; + for (_i1795 = 0; _i1795 < _size1791; ++_i1795) { - xfer += this->replicationMetricList[_i1789].read(iprot); + xfer += this->replicationMetricList[_i1795].read(iprot); } xfer += iprot->readListEnd(); } @@ -50881,10 +50920,10 @@ uint32_t ReplicationMetricList::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("replicationMetricList", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->replicationMetricList.size())); - std::vector ::const_iterator _iter1790; - for (_iter1790 = this->replicationMetricList.begin(); _iter1790 != this->replicationMetricList.end(); ++_iter1790) + std::vector ::const_iterator _iter1796; + for (_iter1796 = this->replicationMetricList.begin(); _iter1796 != this->replicationMetricList.end(); ++_iter1796) { - xfer += (*_iter1790).write(oprot); + xfer += (*_iter1796).write(oprot); } xfer += oprot->writeListEnd(); } @@ -50900,11 +50939,11 @@ void swap(ReplicationMetricList &a, ReplicationMetricList &b) { swap(a.replicationMetricList, b.replicationMetricList); } -ReplicationMetricList::ReplicationMetricList(const ReplicationMetricList& other1791) { - replicationMetricList = other1791.replicationMetricList; +ReplicationMetricList::ReplicationMetricList(const ReplicationMetricList& other1797) { + replicationMetricList = other1797.replicationMetricList; } -ReplicationMetricList& ReplicationMetricList::operator=(const ReplicationMetricList& other1792) { - replicationMetricList = other1792.replicationMetricList; +ReplicationMetricList& ReplicationMetricList::operator=(const ReplicationMetricList& other1798) { + replicationMetricList = other1798.replicationMetricList; return *this; } void ReplicationMetricList::printTo(std::ostream& out) const { @@ -51030,17 +51069,17 @@ void swap(GetReplicationMetricsRequest &a, GetReplicationMetricsRequest &b) { swap(a.__isset, b.__isset); } -GetReplicationMetricsRequest::GetReplicationMetricsRequest(const GetReplicationMetricsRequest& other1793) { - scheduledExecutionId = other1793.scheduledExecutionId; - policy = other1793.policy; - dumpExecutionId = other1793.dumpExecutionId; - __isset = other1793.__isset; +GetReplicationMetricsRequest::GetReplicationMetricsRequest(const GetReplicationMetricsRequest& other1799) { + scheduledExecutionId = other1799.scheduledExecutionId; + policy = other1799.policy; + dumpExecutionId = other1799.dumpExecutionId; + __isset = other1799.__isset; } -GetReplicationMetricsRequest& GetReplicationMetricsRequest::operator=(const GetReplicationMetricsRequest& other1794) { - scheduledExecutionId = other1794.scheduledExecutionId; - policy = other1794.policy; - dumpExecutionId = other1794.dumpExecutionId; - __isset = other1794.__isset; +GetReplicationMetricsRequest& GetReplicationMetricsRequest::operator=(const GetReplicationMetricsRequest& other1800) { + scheduledExecutionId = other1800.scheduledExecutionId; + policy = other1800.policy; + dumpExecutionId = other1800.dumpExecutionId; + __isset = other1800.__isset; return *this; } void GetReplicationMetricsRequest::printTo(std::ostream& out) const { @@ -51093,16 +51132,16 @@ uint32_t GetOpenTxnsRequest::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->excludeTxnTypes.clear(); - uint32_t _size1795; - ::apache::thrift::protocol::TType _etype1798; - xfer += iprot->readListBegin(_etype1798, _size1795); - this->excludeTxnTypes.resize(_size1795); - uint32_t _i1799; - for (_i1799 = 0; _i1799 < _size1795; ++_i1799) + uint32_t _size1801; + ::apache::thrift::protocol::TType _etype1804; + xfer += iprot->readListBegin(_etype1804, _size1801); + this->excludeTxnTypes.resize(_size1801); + uint32_t _i1805; + for (_i1805 = 0; _i1805 < _size1801; ++_i1805) { - int32_t ecast1800; - xfer += iprot->readI32(ecast1800); - this->excludeTxnTypes[_i1799] = static_cast(ecast1800); + int32_t ecast1806; + xfer += iprot->readI32(ecast1806); + this->excludeTxnTypes[_i1805] = static_cast(ecast1806); } xfer += iprot->readListEnd(); } @@ -51132,10 +51171,10 @@ uint32_t GetOpenTxnsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("excludeTxnTypes", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I32, static_cast(this->excludeTxnTypes.size())); - std::vector ::const_iterator _iter1801; - for (_iter1801 = this->excludeTxnTypes.begin(); _iter1801 != this->excludeTxnTypes.end(); ++_iter1801) + std::vector ::const_iterator _iter1807; + for (_iter1807 = this->excludeTxnTypes.begin(); _iter1807 != this->excludeTxnTypes.end(); ++_iter1807) { - xfer += oprot->writeI32(static_cast((*_iter1801))); + xfer += oprot->writeI32(static_cast((*_iter1807))); } xfer += oprot->writeListEnd(); } @@ -51152,13 +51191,13 @@ void swap(GetOpenTxnsRequest &a, GetOpenTxnsRequest &b) { swap(a.__isset, b.__isset); } -GetOpenTxnsRequest::GetOpenTxnsRequest(const GetOpenTxnsRequest& other1802) { - excludeTxnTypes = other1802.excludeTxnTypes; - __isset = other1802.__isset; +GetOpenTxnsRequest::GetOpenTxnsRequest(const GetOpenTxnsRequest& other1808) { + excludeTxnTypes = other1808.excludeTxnTypes; + __isset = other1808.__isset; } -GetOpenTxnsRequest& GetOpenTxnsRequest::operator=(const GetOpenTxnsRequest& other1803) { - excludeTxnTypes = other1803.excludeTxnTypes; - __isset = other1803.__isset; +GetOpenTxnsRequest& GetOpenTxnsRequest::operator=(const GetOpenTxnsRequest& other1809) { + excludeTxnTypes = other1809.excludeTxnTypes; + __isset = other1809.__isset; return *this; } void GetOpenTxnsRequest::printTo(std::ostream& out) const { @@ -51286,15 +51325,15 @@ void swap(StoredProcedureRequest &a, StoredProcedureRequest &b) { swap(a.procName, b.procName); } -StoredProcedureRequest::StoredProcedureRequest(const StoredProcedureRequest& other1804) { - catName = other1804.catName; - dbName = other1804.dbName; - procName = other1804.procName; +StoredProcedureRequest::StoredProcedureRequest(const StoredProcedureRequest& other1810) { + catName = other1810.catName; + dbName = other1810.dbName; + procName = other1810.procName; } -StoredProcedureRequest& StoredProcedureRequest::operator=(const StoredProcedureRequest& other1805) { - catName = other1805.catName; - dbName = other1805.dbName; - procName = other1805.procName; +StoredProcedureRequest& StoredProcedureRequest::operator=(const StoredProcedureRequest& other1811) { + catName = other1811.catName; + dbName = other1811.dbName; + procName = other1811.procName; return *this; } void StoredProcedureRequest::printTo(std::ostream& out) const { @@ -51404,15 +51443,15 @@ void swap(ListStoredProcedureRequest &a, ListStoredProcedureRequest &b) { swap(a.__isset, b.__isset); } -ListStoredProcedureRequest::ListStoredProcedureRequest(const ListStoredProcedureRequest& other1806) { - catName = other1806.catName; - dbName = other1806.dbName; - __isset = other1806.__isset; +ListStoredProcedureRequest::ListStoredProcedureRequest(const ListStoredProcedureRequest& other1812) { + catName = other1812.catName; + dbName = other1812.dbName; + __isset = other1812.__isset; } -ListStoredProcedureRequest& ListStoredProcedureRequest::operator=(const ListStoredProcedureRequest& other1807) { - catName = other1807.catName; - dbName = other1807.dbName; - __isset = other1807.__isset; +ListStoredProcedureRequest& ListStoredProcedureRequest::operator=(const ListStoredProcedureRequest& other1813) { + catName = other1813.catName; + dbName = other1813.dbName; + __isset = other1813.__isset; return *this; } void ListStoredProcedureRequest::printTo(std::ostream& out) const { @@ -51567,21 +51606,21 @@ void swap(StoredProcedure &a, StoredProcedure &b) { swap(a.__isset, b.__isset); } -StoredProcedure::StoredProcedure(const StoredProcedure& other1808) { - name = other1808.name; - dbName = other1808.dbName; - catName = other1808.catName; - ownerName = other1808.ownerName; - source = other1808.source; - __isset = other1808.__isset; +StoredProcedure::StoredProcedure(const StoredProcedure& other1814) { + name = other1814.name; + dbName = other1814.dbName; + catName = other1814.catName; + ownerName = other1814.ownerName; + source = other1814.source; + __isset = other1814.__isset; } -StoredProcedure& StoredProcedure::operator=(const StoredProcedure& other1809) { - name = other1809.name; - dbName = other1809.dbName; - catName = other1809.catName; - ownerName = other1809.ownerName; - source = other1809.source; - __isset = other1809.__isset; +StoredProcedure& StoredProcedure::operator=(const StoredProcedure& other1815) { + name = other1815.name; + dbName = other1815.dbName; + catName = other1815.catName; + ownerName = other1815.ownerName; + source = other1815.source; + __isset = other1815.__isset; return *this; } void StoredProcedure::printTo(std::ostream& out) const { @@ -51756,23 +51795,23 @@ void swap(AddPackageRequest &a, AddPackageRequest &b) { swap(a.__isset, b.__isset); } -AddPackageRequest::AddPackageRequest(const AddPackageRequest& other1810) { - catName = other1810.catName; - dbName = other1810.dbName; - packageName = other1810.packageName; - ownerName = other1810.ownerName; - header = other1810.header; - body = other1810.body; - __isset = other1810.__isset; +AddPackageRequest::AddPackageRequest(const AddPackageRequest& other1816) { + catName = other1816.catName; + dbName = other1816.dbName; + packageName = other1816.packageName; + ownerName = other1816.ownerName; + header = other1816.header; + body = other1816.body; + __isset = other1816.__isset; } -AddPackageRequest& AddPackageRequest::operator=(const AddPackageRequest& other1811) { - catName = other1811.catName; - dbName = other1811.dbName; - packageName = other1811.packageName; - ownerName = other1811.ownerName; - header = other1811.header; - body = other1811.body; - __isset = other1811.__isset; +AddPackageRequest& AddPackageRequest::operator=(const AddPackageRequest& other1817) { + catName = other1817.catName; + dbName = other1817.dbName; + packageName = other1817.packageName; + ownerName = other1817.ownerName; + header = other1817.header; + body = other1817.body; + __isset = other1817.__isset; return *this; } void AddPackageRequest::printTo(std::ostream& out) const { @@ -51905,15 +51944,15 @@ void swap(GetPackageRequest &a, GetPackageRequest &b) { swap(a.packageName, b.packageName); } -GetPackageRequest::GetPackageRequest(const GetPackageRequest& other1812) { - catName = other1812.catName; - dbName = other1812.dbName; - packageName = other1812.packageName; +GetPackageRequest::GetPackageRequest(const GetPackageRequest& other1818) { + catName = other1818.catName; + dbName = other1818.dbName; + packageName = other1818.packageName; } -GetPackageRequest& GetPackageRequest::operator=(const GetPackageRequest& other1813) { - catName = other1813.catName; - dbName = other1813.dbName; - packageName = other1813.packageName; +GetPackageRequest& GetPackageRequest::operator=(const GetPackageRequest& other1819) { + catName = other1819.catName; + dbName = other1819.dbName; + packageName = other1819.packageName; return *this; } void GetPackageRequest::printTo(std::ostream& out) const { @@ -52043,15 +52082,15 @@ void swap(DropPackageRequest &a, DropPackageRequest &b) { swap(a.packageName, b.packageName); } -DropPackageRequest::DropPackageRequest(const DropPackageRequest& other1814) { - catName = other1814.catName; - dbName = other1814.dbName; - packageName = other1814.packageName; +DropPackageRequest::DropPackageRequest(const DropPackageRequest& other1820) { + catName = other1820.catName; + dbName = other1820.dbName; + packageName = other1820.packageName; } -DropPackageRequest& DropPackageRequest::operator=(const DropPackageRequest& other1815) { - catName = other1815.catName; - dbName = other1815.dbName; - packageName = other1815.packageName; +DropPackageRequest& DropPackageRequest::operator=(const DropPackageRequest& other1821) { + catName = other1821.catName; + dbName = other1821.dbName; + packageName = other1821.packageName; return *this; } void DropPackageRequest::printTo(std::ostream& out) const { @@ -52161,15 +52200,15 @@ void swap(ListPackageRequest &a, ListPackageRequest &b) { swap(a.__isset, b.__isset); } -ListPackageRequest::ListPackageRequest(const ListPackageRequest& other1816) { - catName = other1816.catName; - dbName = other1816.dbName; - __isset = other1816.__isset; +ListPackageRequest::ListPackageRequest(const ListPackageRequest& other1822) { + catName = other1822.catName; + dbName = other1822.dbName; + __isset = other1822.__isset; } -ListPackageRequest& ListPackageRequest::operator=(const ListPackageRequest& other1817) { - catName = other1817.catName; - dbName = other1817.dbName; - __isset = other1817.__isset; +ListPackageRequest& ListPackageRequest::operator=(const ListPackageRequest& other1823) { + catName = other1823.catName; + dbName = other1823.dbName; + __isset = other1823.__isset; return *this; } void ListPackageRequest::printTo(std::ostream& out) const { @@ -52341,23 +52380,23 @@ void swap(Package &a, Package &b) { swap(a.__isset, b.__isset); } -Package::Package(const Package& other1818) { - catName = other1818.catName; - dbName = other1818.dbName; - packageName = other1818.packageName; - ownerName = other1818.ownerName; - header = other1818.header; - body = other1818.body; - __isset = other1818.__isset; +Package::Package(const Package& other1824) { + catName = other1824.catName; + dbName = other1824.dbName; + packageName = other1824.packageName; + ownerName = other1824.ownerName; + header = other1824.header; + body = other1824.body; + __isset = other1824.__isset; } -Package& Package::operator=(const Package& other1819) { - catName = other1819.catName; - dbName = other1819.dbName; - packageName = other1819.packageName; - ownerName = other1819.ownerName; - header = other1819.header; - body = other1819.body; - __isset = other1819.__isset; +Package& Package::operator=(const Package& other1825) { + catName = other1825.catName; + dbName = other1825.dbName; + packageName = other1825.packageName; + ownerName = other1825.ownerName; + header = other1825.header; + body = other1825.body; + __isset = other1825.__isset; return *this; } void Package::printTo(std::ostream& out) const { @@ -52489,17 +52528,17 @@ void swap(GetAllWriteEventInfoRequest &a, GetAllWriteEventInfoRequest &b) { swap(a.__isset, b.__isset); } -GetAllWriteEventInfoRequest::GetAllWriteEventInfoRequest(const GetAllWriteEventInfoRequest& other1820) { - txnId = other1820.txnId; - dbName = other1820.dbName; - tableName = other1820.tableName; - __isset = other1820.__isset; +GetAllWriteEventInfoRequest::GetAllWriteEventInfoRequest(const GetAllWriteEventInfoRequest& other1826) { + txnId = other1826.txnId; + dbName = other1826.dbName; + tableName = other1826.tableName; + __isset = other1826.__isset; } -GetAllWriteEventInfoRequest& GetAllWriteEventInfoRequest::operator=(const GetAllWriteEventInfoRequest& other1821) { - txnId = other1821.txnId; - dbName = other1821.dbName; - tableName = other1821.tableName; - __isset = other1821.__isset; +GetAllWriteEventInfoRequest& GetAllWriteEventInfoRequest::operator=(const GetAllWriteEventInfoRequest& other1827) { + txnId = other1827.txnId; + dbName = other1827.dbName; + tableName = other1827.tableName; + __isset = other1827.__isset; return *this; } void GetAllWriteEventInfoRequest::printTo(std::ostream& out) const { @@ -52587,13 +52626,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other1822) : TException() { - message = other1822.message; - __isset = other1822.__isset; +MetaException::MetaException(const MetaException& other1828) : TException() { + message = other1828.message; + __isset = other1828.__isset; } -MetaException& MetaException::operator=(const MetaException& other1823) { - message = other1823.message; - __isset = other1823.__isset; +MetaException& MetaException::operator=(const MetaException& other1829) { + message = other1829.message; + __isset = other1829.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -52690,13 +52729,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other1824) : TException() { - message = other1824.message; - __isset = other1824.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other1830) : TException() { + message = other1830.message; + __isset = other1830.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1825) { - message = other1825.message; - __isset = other1825.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other1831) { + message = other1831.message; + __isset = other1831.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -52793,13 +52832,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other1826) : TException() { - message = other1826.message; - __isset = other1826.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other1832) : TException() { + message = other1832.message; + __isset = other1832.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1827) { - message = other1827.message; - __isset = other1827.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other1833) { + message = other1833.message; + __isset = other1833.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -52896,13 +52935,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1828) : TException() { - message = other1828.message; - __isset = other1828.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other1834) : TException() { + message = other1834.message; + __isset = other1834.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1829) { - message = other1829.message; - __isset = other1829.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other1835) { + message = other1835.message; + __isset = other1835.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -52999,13 +53038,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1830) : TException() { - message = other1830.message; - __isset = other1830.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other1836) : TException() { + message = other1836.message; + __isset = other1836.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1831) { - message = other1831.message; - __isset = other1831.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other1837) { + message = other1837.message; + __isset = other1837.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -53102,13 +53141,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1832) : TException() { - message = other1832.message; - __isset = other1832.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other1838) : TException() { + message = other1838.message; + __isset = other1838.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1833) { - message = other1833.message; - __isset = other1833.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other1839) { + message = other1839.message; + __isset = other1839.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -53205,13 +53244,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1834) : TException() { - message = other1834.message; - __isset = other1834.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other1840) : TException() { + message = other1840.message; + __isset = other1840.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1835) { - message = other1835.message; - __isset = other1835.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other1841) { + message = other1841.message; + __isset = other1841.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -53308,13 +53347,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1836) : TException() { - message = other1836.message; - __isset = other1836.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other1842) : TException() { + message = other1842.message; + __isset = other1842.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1837) { - message = other1837.message; - __isset = other1837.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other1843) { + message = other1843.message; + __isset = other1843.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -53411,13 +53450,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1838) : TException() { - message = other1838.message; - __isset = other1838.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other1844) : TException() { + message = other1844.message; + __isset = other1844.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1839) { - message = other1839.message; - __isset = other1839.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other1845) { + message = other1845.message; + __isset = other1845.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -53514,13 +53553,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1840) : TException() { - message = other1840.message; - __isset = other1840.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other1846) : TException() { + message = other1846.message; + __isset = other1846.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1841) { - message = other1841.message; - __isset = other1841.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other1847) { + message = other1847.message; + __isset = other1847.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -53617,13 +53656,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other1842) : TException() { - message = other1842.message; - __isset = other1842.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other1848) : TException() { + message = other1848.message; + __isset = other1848.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1843) { - message = other1843.message; - __isset = other1843.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other1849) { + message = other1849.message; + __isset = other1849.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -53720,13 +53759,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1844) : TException() { - message = other1844.message; - __isset = other1844.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other1850) : TException() { + message = other1850.message; + __isset = other1850.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1845) { - message = other1845.message; - __isset = other1845.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other1851) { + message = other1851.message; + __isset = other1851.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -53823,13 +53862,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1846) : TException() { - message = other1846.message; - __isset = other1846.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other1852) : TException() { + message = other1852.message; + __isset = other1852.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1847) { - message = other1847.message; - __isset = other1847.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other1853) { + message = other1853.message; + __isset = other1853.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -53926,13 +53965,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other1848) : TException() { - message = other1848.message; - __isset = other1848.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other1854) : TException() { + message = other1854.message; + __isset = other1854.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1849) { - message = other1849.message; - __isset = other1849.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other1855) { + message = other1855.message; + __isset = other1855.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -54029,13 +54068,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1850) : TException() { - message = other1850.message; - __isset = other1850.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other1856) : TException() { + message = other1856.message; + __isset = other1856.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1851) { - message = other1851.message; - __isset = other1851.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other1857) { + message = other1857.message; + __isset = other1857.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { @@ -54132,13 +54171,13 @@ void swap(CompactionAbortedException &a, CompactionAbortedException &b) { swap(a.__isset, b.__isset); } -CompactionAbortedException::CompactionAbortedException(const CompactionAbortedException& other1852) : TException() { - message = other1852.message; - __isset = other1852.__isset; +CompactionAbortedException::CompactionAbortedException(const CompactionAbortedException& other1858) : TException() { + message = other1858.message; + __isset = other1858.__isset; } -CompactionAbortedException& CompactionAbortedException::operator=(const CompactionAbortedException& other1853) { - message = other1853.message; - __isset = other1853.__isset; +CompactionAbortedException& CompactionAbortedException::operator=(const CompactionAbortedException& other1859) { + message = other1859.message; + __isset = other1859.__isset; return *this; } void CompactionAbortedException::printTo(std::ostream& out) const { @@ -54235,13 +54274,13 @@ void swap(NoSuchCompactionException &a, NoSuchCompactionException &b) { swap(a.__isset, b.__isset); } -NoSuchCompactionException::NoSuchCompactionException(const NoSuchCompactionException& other1854) : TException() { - message = other1854.message; - __isset = other1854.__isset; +NoSuchCompactionException::NoSuchCompactionException(const NoSuchCompactionException& other1860) : TException() { + message = other1860.message; + __isset = other1860.__isset; } -NoSuchCompactionException& NoSuchCompactionException::operator=(const NoSuchCompactionException& other1855) { - message = other1855.message; - __isset = other1855.__isset; +NoSuchCompactionException& NoSuchCompactionException::operator=(const NoSuchCompactionException& other1861) { + message = other1861.message; + __isset = other1861.__isset; return *this; } void NoSuchCompactionException::printTo(std::ostream& out) const { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h index 47d79d6ae08d..2a65c40f531a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -5012,7 +5012,7 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b); std::ostream& operator<<(std::ostream& out, const ColumnStatisticsDesc& obj); typedef struct _ColumnStatistics__isset { - _ColumnStatistics__isset() : isStatsCompliant(false), engine(false) {} + _ColumnStatistics__isset() : isStatsCompliant(false), engine(true) {} bool isStatsCompliant :1; bool engine :1; } _ColumnStatistics__isset; @@ -5022,9 +5022,8 @@ class ColumnStatistics : public virtual ::apache::thrift::TBase { ColumnStatistics(const ColumnStatistics&); ColumnStatistics& operator=(const ColumnStatistics&); - ColumnStatistics() noexcept - : isStatsCompliant(0), - engine() { + ColumnStatistics() : isStatsCompliant(0), + engine("hive") { } virtual ~ColumnStatistics() noexcept; @@ -5938,10 +5937,11 @@ void swap(AggrStats &a, AggrStats &b); std::ostream& operator<<(std::ostream& out, const AggrStats& obj); typedef struct _SetPartitionsStatsRequest__isset { - _SetPartitionsStatsRequest__isset() : needMerge(false), writeId(true), validWriteIdList(false) {} + _SetPartitionsStatsRequest__isset() : needMerge(false), writeId(true), validWriteIdList(false), engine(true) {} bool needMerge :1; bool writeId :1; bool validWriteIdList :1; + bool engine :1; } _SetPartitionsStatsRequest__isset; class SetPartitionsStatsRequest : public virtual ::apache::thrift::TBase { @@ -5949,11 +5949,10 @@ class SetPartitionsStatsRequest : public virtual ::apache::thrift::TBase { SetPartitionsStatsRequest(const SetPartitionsStatsRequest&); SetPartitionsStatsRequest& operator=(const SetPartitionsStatsRequest&); - SetPartitionsStatsRequest() noexcept - : needMerge(0), - writeId(-1LL), - validWriteIdList(), - engine() { + SetPartitionsStatsRequest() : needMerge(0), + writeId(-1LL), + validWriteIdList(), + engine("hive") { } virtual ~SetPartitionsStatsRequest() noexcept; @@ -5991,7 +5990,9 @@ class SetPartitionsStatsRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) return false; - if (!(engine == rhs.engine)) + if (__isset.engine != rhs.__isset.engine) + return false; + else if (__isset.engine && !(engine == rhs.engine)) return false; return true; } @@ -7473,9 +7474,10 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b); std::ostream& operator<<(std::ostream& out, const PartitionsStatsResult& obj); typedef struct _TableStatsRequest__isset { - _TableStatsRequest__isset() : catName(false), validWriteIdList(false), id(true) {} + _TableStatsRequest__isset() : catName(false), validWriteIdList(false), engine(true), id(true) {} bool catName :1; bool validWriteIdList :1; + bool engine :1; bool id :1; } _TableStatsRequest__isset; @@ -7484,13 +7486,12 @@ class TableStatsRequest : public virtual ::apache::thrift::TBase { TableStatsRequest(const TableStatsRequest&); TableStatsRequest& operator=(const TableStatsRequest&); - TableStatsRequest() noexcept - : dbName(), - tblName(), - catName(), - validWriteIdList(), - engine(), - id(-1LL) { + TableStatsRequest() : dbName(), + tblName(), + catName(), + validWriteIdList(), + engine("hive"), + id(-1LL) { } virtual ~TableStatsRequest() noexcept; @@ -7534,7 +7535,9 @@ class TableStatsRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) return false; - if (!(engine == rhs.engine)) + if (__isset.engine != rhs.__isset.engine) + return false; + else if (__isset.engine && !(engine == rhs.engine)) return false; if (__isset.id != rhs.__isset.id) return false; @@ -7559,9 +7562,10 @@ void swap(TableStatsRequest &a, TableStatsRequest &b); std::ostream& operator<<(std::ostream& out, const TableStatsRequest& obj); typedef struct _PartitionsStatsRequest__isset { - _PartitionsStatsRequest__isset() : catName(false), validWriteIdList(false) {} + _PartitionsStatsRequest__isset() : catName(false), validWriteIdList(false), engine(true) {} bool catName :1; bool validWriteIdList :1; + bool engine :1; } _PartitionsStatsRequest__isset; class PartitionsStatsRequest : public virtual ::apache::thrift::TBase { @@ -7569,12 +7573,11 @@ class PartitionsStatsRequest : public virtual ::apache::thrift::TBase { PartitionsStatsRequest(const PartitionsStatsRequest&); PartitionsStatsRequest& operator=(const PartitionsStatsRequest&); - PartitionsStatsRequest() noexcept - : dbName(), - tblName(), - catName(), - validWriteIdList(), - engine() { + PartitionsStatsRequest() : dbName(), + tblName(), + catName(), + validWriteIdList(), + engine("hive") { } virtual ~PartitionsStatsRequest() noexcept; @@ -7620,7 +7623,9 @@ class PartitionsStatsRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.validWriteIdList && !(validWriteIdList == rhs.validWriteIdList)) return false; - if (!(engine == rhs.engine)) + if (__isset.engine != rhs.__isset.engine) + return false; + else if (__isset.engine && !(engine == rhs.engine)) return false; return true; } @@ -8254,7 +8259,7 @@ void swap(PartitionValuesResponse &a, PartitionValuesResponse &b); std::ostream& operator<<(std::ostream& out, const PartitionValuesResponse& obj); typedef struct _GetPartitionsByNamesRequest__isset { - _GetPartitionsByNamesRequest__isset() : names(false), get_col_stats(false), processorCapabilities(false), processorIdentifier(false), engine(false), validWriteIdList(false), getFileMetadata(false), id(true), skipColumnSchemaForPartition(false), includeParamKeyPattern(false), excludeParamKeyPattern(false) {} + _GetPartitionsByNamesRequest__isset() : names(false), get_col_stats(false), processorCapabilities(false), processorIdentifier(false), engine(true), validWriteIdList(false), getFileMetadata(false), id(true), skipColumnSchemaForPartition(false), includeParamKeyPattern(false), excludeParamKeyPattern(false) {} bool names :1; bool get_col_stats :1; bool processorCapabilities :1; @@ -8273,18 +8278,17 @@ class GetPartitionsByNamesRequest : public virtual ::apache::thrift::TBase { GetPartitionsByNamesRequest(const GetPartitionsByNamesRequest&); GetPartitionsByNamesRequest& operator=(const GetPartitionsByNamesRequest&); - GetPartitionsByNamesRequest() noexcept - : db_name(), - tbl_name(), - get_col_stats(0), - processorIdentifier(), - engine(), - validWriteIdList(), - getFileMetadata(0), - id(-1LL), - skipColumnSchemaForPartition(0), - includeParamKeyPattern(), - excludeParamKeyPattern() { + GetPartitionsByNamesRequest() : db_name(), + tbl_name(), + get_col_stats(0), + processorIdentifier(), + engine("hive"), + validWriteIdList(), + getFileMetadata(0), + id(-1LL), + skipColumnSchemaForPartition(0), + includeParamKeyPattern(), + excludeParamKeyPattern() { } virtual ~GetPartitionsByNamesRequest() noexcept; @@ -13888,7 +13892,7 @@ void swap(GetProjectionsSpec &a, GetProjectionsSpec &b); std::ostream& operator<<(std::ostream& out, const GetProjectionsSpec& obj); typedef struct _GetTableRequest__isset { - _GetTableRequest__isset() : capabilities(false), catName(false), validWriteIdList(false), getColumnStats(false), processorCapabilities(false), processorIdentifier(false), engine(false), id(true) {} + _GetTableRequest__isset() : capabilities(false), catName(false), validWriteIdList(false), getColumnStats(false), processorCapabilities(false), processorIdentifier(false), engine(true), id(true) {} bool capabilities :1; bool catName :1; bool validWriteIdList :1; @@ -13904,15 +13908,14 @@ class GetTableRequest : public virtual ::apache::thrift::TBase { GetTableRequest(const GetTableRequest&); GetTableRequest& operator=(const GetTableRequest&); - GetTableRequest() noexcept - : dbName(), - tblName(), - catName(), - validWriteIdList(), - getColumnStats(0), - processorIdentifier(), - engine(), - id(-1LL) { + GetTableRequest() : dbName(), + tblName(), + catName(), + validWriteIdList(), + getColumnStats(0), + processorIdentifier(), + engine("hive"), + id(-1LL) { } virtual ~GetTableRequest() noexcept; @@ -19720,7 +19723,7 @@ void swap(GetPartitionNamesPsResponse &a, GetPartitionNamesPsResponse &b); std::ostream& operator<<(std::ostream& out, const GetPartitionNamesPsResponse& obj); typedef struct _GetPartitionsPsWithAuthRequest__isset { - _GetPartitionsPsWithAuthRequest__isset() : catName(false), partVals(false), maxParts(true), userName(false), groupNames(false), validWriteIdList(false), id(true), skipColumnSchemaForPartition(false), includeParamKeyPattern(false), excludeParamKeyPattern(false) {} + _GetPartitionsPsWithAuthRequest__isset() : catName(false), partVals(false), maxParts(true), userName(false), groupNames(false), validWriteIdList(false), id(true), skipColumnSchemaForPartition(false), includeParamKeyPattern(false), excludeParamKeyPattern(false), partNames(false) {} bool catName :1; bool partVals :1; bool maxParts :1; @@ -19731,6 +19734,7 @@ typedef struct _GetPartitionsPsWithAuthRequest__isset { bool skipColumnSchemaForPartition :1; bool includeParamKeyPattern :1; bool excludeParamKeyPattern :1; + bool partNames :1; } _GetPartitionsPsWithAuthRequest__isset; class GetPartitionsPsWithAuthRequest : public virtual ::apache::thrift::TBase { @@ -19764,6 +19768,7 @@ class GetPartitionsPsWithAuthRequest : public virtual ::apache::thrift::TBase { bool skipColumnSchemaForPartition; std::string includeParamKeyPattern; std::string excludeParamKeyPattern; + std::vector partNames; _GetPartitionsPsWithAuthRequest__isset __isset; @@ -19791,6 +19796,8 @@ class GetPartitionsPsWithAuthRequest : public virtual ::apache::thrift::TBase { void __set_excludeParamKeyPattern(const std::string& val); + void __set_partNames(const std::vector & val); + bool operator == (const GetPartitionsPsWithAuthRequest & rhs) const { if (__isset.catName != rhs.__isset.catName) @@ -19837,6 +19844,10 @@ class GetPartitionsPsWithAuthRequest : public virtual ::apache::thrift::TBase { return false; else if (__isset.excludeParamKeyPattern && !(excludeParamKeyPattern == rhs.excludeParamKeyPattern)) return false; + if (__isset.partNames != rhs.__isset.partNames) + return false; + else if (__isset.partNames && !(partNames == rhs.partNames)) + return false; return true; } bool operator != (const GetPartitionsPsWithAuthRequest &rhs) const { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index 782586acece1..3fffbce5d105 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -114,6 +114,8 @@ public java.lang.String getFieldName() { } public ColumnStatistics() { + this.engine = "hive"; + } public ColumnStatistics( @@ -156,7 +158,8 @@ public void clear() { this.statsObj = null; setIsStatsCompliantIsSet(false); this.isStatsCompliant = false; - this.engine = null; + this.engine = "hive"; + } @org.apache.thrift.annotation.Nullable diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsRequest.java index 9e3e03c5f308..9a77c9b3a30a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsRequest.java @@ -321,15 +321,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsRequest case 1: // EXCLUDE_TXN_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1520 = iprot.readListBegin(); - struct.excludeTxnTypes = new java.util.ArrayList(_list1520.size); - @org.apache.thrift.annotation.Nullable TxnType _elem1521; - for (int _i1522 = 0; _i1522 < _list1520.size; ++_i1522) + org.apache.thrift.protocol.TList _list1528 = iprot.readListBegin(); + struct.excludeTxnTypes = new java.util.ArrayList(_list1528.size); + @org.apache.thrift.annotation.Nullable TxnType _elem1529; + for (int _i1530 = 0; _i1530 < _list1528.size; ++_i1530) { - _elem1521 = org.apache.hadoop.hive.metastore.api.TxnType.findByValue(iprot.readI32()); - if (_elem1521 != null) + _elem1529 = org.apache.hadoop.hive.metastore.api.TxnType.findByValue(iprot.readI32()); + if (_elem1529 != null) { - struct.excludeTxnTypes.add(_elem1521); + struct.excludeTxnTypes.add(_elem1529); } } iprot.readListEnd(); @@ -357,9 +357,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRequest oprot.writeFieldBegin(EXCLUDE_TXN_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.excludeTxnTypes.size())); - for (TxnType _iter1523 : struct.excludeTxnTypes) + for (TxnType _iter1531 : struct.excludeTxnTypes) { - oprot.writeI32(_iter1523.getValue()); + oprot.writeI32(_iter1531.getValue()); } oprot.writeListEnd(); } @@ -391,9 +391,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsRequest if (struct.isSetExcludeTxnTypes()) { { oprot.writeI32(struct.excludeTxnTypes.size()); - for (TxnType _iter1524 : struct.excludeTxnTypes) + for (TxnType _iter1532 : struct.excludeTxnTypes) { - oprot.writeI32(_iter1524.getValue()); + oprot.writeI32(_iter1532.getValue()); } } } @@ -405,15 +405,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsRequest s java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1525 = iprot.readListBegin(org.apache.thrift.protocol.TType.I32); - struct.excludeTxnTypes = new java.util.ArrayList(_list1525.size); - @org.apache.thrift.annotation.Nullable TxnType _elem1526; - for (int _i1527 = 0; _i1527 < _list1525.size; ++_i1527) + org.apache.thrift.protocol.TList _list1533 = iprot.readListBegin(org.apache.thrift.protocol.TType.I32); + struct.excludeTxnTypes = new java.util.ArrayList(_list1533.size); + @org.apache.thrift.annotation.Nullable TxnType _elem1534; + for (int _i1535 = 0; _i1535 < _list1533.size; ++_i1535) { - _elem1526 = org.apache.hadoop.hive.metastore.api.TxnType.findByValue(iprot.readI32()); - if (_elem1526 != null) + _elem1534 = org.apache.hadoop.hive.metastore.api.TxnType.findByValue(iprot.readI32()); + if (_elem1534 != null) { - struct.excludeTxnTypes.add(_elem1526); + struct.excludeTxnTypes.add(_elem1534); } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java index 2ecd9aa0c1a5..2654f09d4f4e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsByNamesRequest.java @@ -181,6 +181,8 @@ public java.lang.String getFieldName() { } public GetPartitionsByNamesRequest() { + this.engine = "hive"; + this.id = -1L; } @@ -247,7 +249,8 @@ public void clear() { this.get_col_stats = false; this.processorCapabilities = null; this.processorIdentifier = null; - this.engine = null; + this.engine = "hive"; + this.validWriteIdList = null; setGetFileMetadataIsSet(false); this.getFileMetadata = false; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthRequest.java index 8cd28971b2ae..d490a6810f0a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthRequest.java @@ -23,6 +23,7 @@ private static final org.apache.thrift.protocol.TField SKIP_COLUMN_SCHEMA_FOR_PARTITION_FIELD_DESC = new org.apache.thrift.protocol.TField("skipColumnSchemaForPartition", org.apache.thrift.protocol.TType.BOOL, (short)10); private static final org.apache.thrift.protocol.TField INCLUDE_PARAM_KEY_PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("includeParamKeyPattern", org.apache.thrift.protocol.TType.STRING, (short)11); private static final org.apache.thrift.protocol.TField EXCLUDE_PARAM_KEY_PATTERN_FIELD_DESC = new org.apache.thrift.protocol.TField("excludeParamKeyPattern", org.apache.thrift.protocol.TType.STRING, (short)12); + private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)13); private static final org.apache.thrift.scheme.SchemeFactory STANDARD_SCHEME_FACTORY = new GetPartitionsPsWithAuthRequestStandardSchemeFactory(); private static final org.apache.thrift.scheme.SchemeFactory TUPLE_SCHEME_FACTORY = new GetPartitionsPsWithAuthRequestTupleSchemeFactory(); @@ -39,6 +40,7 @@ private boolean skipColumnSchemaForPartition; // optional private @org.apache.thrift.annotation.Nullable java.lang.String includeParamKeyPattern; // optional private @org.apache.thrift.annotation.Nullable java.lang.String excludeParamKeyPattern; // optional + private @org.apache.thrift.annotation.Nullable java.util.List partNames; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -53,7 +55,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum { ID((short)9, "id"), SKIP_COLUMN_SCHEMA_FOR_PARTITION((short)10, "skipColumnSchemaForPartition"), INCLUDE_PARAM_KEY_PATTERN((short)11, "includeParamKeyPattern"), - EXCLUDE_PARAM_KEY_PATTERN((short)12, "excludeParamKeyPattern"); + EXCLUDE_PARAM_KEY_PATTERN((short)12, "excludeParamKeyPattern"), + PART_NAMES((short)13, "partNames"); private static final java.util.Map byName = new java.util.HashMap(); @@ -93,6 +96,8 @@ public static _Fields findByThriftId(int fieldId) { return INCLUDE_PARAM_KEY_PATTERN; case 12: // EXCLUDE_PARAM_KEY_PATTERN return EXCLUDE_PARAM_KEY_PATTERN; + case 13: // PART_NAMES + return PART_NAMES; default: return null; } @@ -138,7 +143,7 @@ public java.lang.String getFieldName() { private static final int __ID_ISSET_ID = 1; private static final int __SKIPCOLUMNSCHEMAFORPARTITION_ISSET_ID = 2; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.PART_VALS,_Fields.MAX_PARTS,_Fields.USER_NAME,_Fields.GROUP_NAMES,_Fields.VALID_WRITE_ID_LIST,_Fields.ID,_Fields.SKIP_COLUMN_SCHEMA_FOR_PARTITION,_Fields.INCLUDE_PARAM_KEY_PATTERN,_Fields.EXCLUDE_PARAM_KEY_PATTERN}; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.PART_VALS,_Fields.MAX_PARTS,_Fields.USER_NAME,_Fields.GROUP_NAMES,_Fields.VALID_WRITE_ID_LIST,_Fields.ID,_Fields.SKIP_COLUMN_SCHEMA_FOR_PARTITION,_Fields.INCLUDE_PARAM_KEY_PATTERN,_Fields.EXCLUDE_PARAM_KEY_PATTERN,_Fields.PART_NAMES}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -168,6 +173,9 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.EXCLUDE_PARAM_KEY_PATTERN, new org.apache.thrift.meta_data.FieldMetaData("excludeParamKeyPattern", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPartitionsPsWithAuthRequest.class, metaDataMap); } @@ -225,6 +233,10 @@ public GetPartitionsPsWithAuthRequest(GetPartitionsPsWithAuthRequest other) { if (other.isSetExcludeParamKeyPattern()) { this.excludeParamKeyPattern = other.excludeParamKeyPattern; } + if (other.isSetPartNames()) { + java.util.List __this__partNames = new java.util.ArrayList(other.partNames); + this.partNames = __this__partNames; + } } public GetPartitionsPsWithAuthRequest deepCopy() { @@ -248,6 +260,7 @@ public void clear() { this.skipColumnSchemaForPartition = false; this.includeParamKeyPattern = null; this.excludeParamKeyPattern = null; + this.partNames = null; } @org.apache.thrift.annotation.Nullable @@ -564,6 +577,46 @@ public void setExcludeParamKeyPatternIsSet(boolean value) { } } + public int getPartNamesSize() { + return (this.partNames == null) ? 0 : this.partNames.size(); + } + + @org.apache.thrift.annotation.Nullable + public java.util.Iterator getPartNamesIterator() { + return (this.partNames == null) ? null : this.partNames.iterator(); + } + + public void addToPartNames(java.lang.String elem) { + if (this.partNames == null) { + this.partNames = new java.util.ArrayList(); + } + this.partNames.add(elem); + } + + @org.apache.thrift.annotation.Nullable + public java.util.List getPartNames() { + return this.partNames; + } + + public void setPartNames(@org.apache.thrift.annotation.Nullable java.util.List partNames) { + this.partNames = partNames; + } + + public void unsetPartNames() { + this.partNames = null; + } + + /** Returns true if field partNames is set (has been assigned a value) and false otherwise */ + public boolean isSetPartNames() { + return this.partNames != null; + } + + public void setPartNamesIsSet(boolean value) { + if (!value) { + this.partNames = null; + } + } + public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable java.lang.Object value) { switch (field) { case CAT_NAME: @@ -662,6 +715,14 @@ public void setFieldValue(_Fields field, @org.apache.thrift.annotation.Nullable } break; + case PART_NAMES: + if (value == null) { + unsetPartNames(); + } else { + setPartNames((java.util.List)value); + } + break; + } } @@ -704,6 +765,9 @@ public java.lang.Object getFieldValue(_Fields field) { case EXCLUDE_PARAM_KEY_PATTERN: return getExcludeParamKeyPattern(); + case PART_NAMES: + return getPartNames(); + } throw new java.lang.IllegalStateException(); } @@ -739,6 +803,8 @@ public boolean isSet(_Fields field) { return isSetIncludeParamKeyPattern(); case EXCLUDE_PARAM_KEY_PATTERN: return isSetExcludeParamKeyPattern(); + case PART_NAMES: + return isSetPartNames(); } throw new java.lang.IllegalStateException(); } @@ -864,6 +930,15 @@ public boolean equals(GetPartitionsPsWithAuthRequest that) { return false; } + boolean this_present_partNames = true && this.isSetPartNames(); + boolean that_present_partNames = true && that.isSetPartNames(); + if (this_present_partNames || that_present_partNames) { + if (!(this_present_partNames && that_present_partNames)) + return false; + if (!this.partNames.equals(that.partNames)) + return false; + } + return true; } @@ -919,6 +994,10 @@ public int hashCode() { if (isSetExcludeParamKeyPattern()) hashCode = hashCode * 8191 + excludeParamKeyPattern.hashCode(); + hashCode = hashCode * 8191 + ((isSetPartNames()) ? 131071 : 524287); + if (isSetPartNames()) + hashCode = hashCode * 8191 + partNames.hashCode(); + return hashCode; } @@ -1050,6 +1129,16 @@ public int compareTo(GetPartitionsPsWithAuthRequest other) { return lastComparison; } } + lastComparison = java.lang.Boolean.compare(isSetPartNames(), other.isSetPartNames()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPartNames()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1174,6 +1263,16 @@ public java.lang.String toString() { } first = false; } + if (isSetPartNames()) { + if (!first) sb.append(", "); + sb.append("partNames:"); + if (this.partNames == null) { + sb.append("null"); + } else { + sb.append(this.partNames); + } + first = false; + } sb.append(")"); return sb.toString(); } @@ -1343,6 +1442,24 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsPsWith org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 13: // PART_NAMES + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1494 = iprot.readListBegin(); + struct.partNames = new java.util.ArrayList(_list1494.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1495; + for (int _i1496 = 0; _i1496 < _list1494.size; ++_i1496) + { + _elem1495 = iprot.readString(); + struct.partNames.add(_elem1495); + } + iprot.readListEnd(); + } + struct.setPartNamesIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1378,9 +1495,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsPsWit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partVals.size())); - for (java.lang.String _iter1494 : struct.partVals) + for (java.lang.String _iter1497 : struct.partVals) { - oprot.writeString(_iter1494); + oprot.writeString(_iter1497); } oprot.writeListEnd(); } @@ -1404,9 +1521,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsPsWit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.groupNames.size())); - for (java.lang.String _iter1495 : struct.groupNames) + for (java.lang.String _iter1498 : struct.groupNames) { - oprot.writeString(_iter1495); + oprot.writeString(_iter1498); } oprot.writeListEnd(); } @@ -1444,6 +1561,20 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsPsWit oprot.writeFieldEnd(); } } + if (struct.partNames != null) { + if (struct.isSetPartNames()) { + oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); + for (java.lang.String _iter1499 : struct.partNames) + { + oprot.writeString(_iter1499); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1494,16 +1625,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWith if (struct.isSetExcludeParamKeyPattern()) { optionals.set(9); } - oprot.writeBitSet(optionals, 10); + if (struct.isSetPartNames()) { + optionals.set(10); + } + oprot.writeBitSet(optionals, 11); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } if (struct.isSetPartVals()) { { oprot.writeI32(struct.partVals.size()); - for (java.lang.String _iter1496 : struct.partVals) + for (java.lang.String _iter1500 : struct.partVals) { - oprot.writeString(_iter1496); + oprot.writeString(_iter1500); } } } @@ -1516,9 +1650,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWith if (struct.isSetGroupNames()) { { oprot.writeI32(struct.groupNames.size()); - for (java.lang.String _iter1497 : struct.groupNames) + for (java.lang.String _iter1501 : struct.groupNames) { - oprot.writeString(_iter1497); + oprot.writeString(_iter1501); } } } @@ -1537,6 +1671,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWith if (struct.isSetExcludeParamKeyPattern()) { oprot.writeString(struct.excludeParamKeyPattern); } + if (struct.isSetPartNames()) { + { + oprot.writeI32(struct.partNames.size()); + for (java.lang.String _iter1502 : struct.partNames) + { + oprot.writeString(_iter1502); + } + } + } } @Override @@ -1546,20 +1689,20 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWithA struct.setDbNameIsSet(true); struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(10); + java.util.BitSet incoming = iprot.readBitSet(11); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1498 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.partVals = new java.util.ArrayList(_list1498.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1499; - for (int _i1500 = 0; _i1500 < _list1498.size; ++_i1500) + org.apache.thrift.protocol.TList _list1503 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.partVals = new java.util.ArrayList(_list1503.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1504; + for (int _i1505 = 0; _i1505 < _list1503.size; ++_i1505) { - _elem1499 = iprot.readString(); - struct.partVals.add(_elem1499); + _elem1504 = iprot.readString(); + struct.partVals.add(_elem1504); } } struct.setPartValsIsSet(true); @@ -1574,13 +1717,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWithA } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1501 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.groupNames = new java.util.ArrayList(_list1501.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1502; - for (int _i1503 = 0; _i1503 < _list1501.size; ++_i1503) + org.apache.thrift.protocol.TList _list1506 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.groupNames = new java.util.ArrayList(_list1506.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1507; + for (int _i1508 = 0; _i1508 < _list1506.size; ++_i1508) { - _elem1502 = iprot.readString(); - struct.groupNames.add(_elem1502); + _elem1507 = iprot.readString(); + struct.groupNames.add(_elem1507); } } struct.setGroupNamesIsSet(true); @@ -1605,6 +1748,19 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWithA struct.excludeParamKeyPattern = iprot.readString(); struct.setExcludeParamKeyPatternIsSet(true); } + if (incoming.get(10)) { + { + org.apache.thrift.protocol.TList _list1509 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.partNames = new java.util.ArrayList(_list1509.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1510; + for (int _i1511 = 0; _i1511 < _list1509.size; ++_i1511) + { + _elem1510 = iprot.readString(); + struct.partNames.add(_elem1510); + } + } + struct.setPartNamesIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthResponse.java index fa839ad0470d..6984966a98d8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthResponse.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPartitionsPsWithAuthResponse.java @@ -329,14 +329,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPartitionsPsWith case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1504 = iprot.readListBegin(); - struct.partitions = new java.util.ArrayList(_list1504.size); - @org.apache.thrift.annotation.Nullable Partition _elem1505; - for (int _i1506 = 0; _i1506 < _list1504.size; ++_i1506) + org.apache.thrift.protocol.TList _list1512 = iprot.readListBegin(); + struct.partitions = new java.util.ArrayList(_list1512.size); + @org.apache.thrift.annotation.Nullable Partition _elem1513; + for (int _i1514 = 0; _i1514 < _list1512.size; ++_i1514) { - _elem1505 = new Partition(); - _elem1505.read(iprot); - struct.partitions.add(_elem1505); + _elem1513 = new Partition(); + _elem1513.read(iprot); + struct.partitions.add(_elem1513); } iprot.readListEnd(); } @@ -362,9 +362,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPartitionsPsWit oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter1507 : struct.partitions) + for (Partition _iter1515 : struct.partitions) { - _iter1507.write(oprot); + _iter1515.write(oprot); } oprot.writeListEnd(); } @@ -389,9 +389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWith org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter1508 : struct.partitions) + for (Partition _iter1516 : struct.partitions) { - _iter1508.write(oprot); + _iter1516.write(oprot); } } } @@ -400,14 +400,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWith public void read(org.apache.thrift.protocol.TProtocol prot, GetPartitionsPsWithAuthResponse struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list1509 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.partitions = new java.util.ArrayList(_list1509.size); - @org.apache.thrift.annotation.Nullable Partition _elem1510; - for (int _i1511 = 0; _i1511 < _list1509.size; ++_i1511) + org.apache.thrift.protocol.TList _list1517 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.partitions = new java.util.ArrayList(_list1517.size); + @org.apache.thrift.annotation.Nullable Partition _elem1518; + for (int _i1519 = 0; _i1519 < _list1517.size; ++_i1519) { - _elem1510 = new Partition(); - _elem1510.read(iprot); - struct.partitions.add(_elem1510); + _elem1518 = new Partition(); + _elem1518.read(iprot); + struct.partitions.add(_elem1518); } } struct.setPartitionsIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java index 50d04e78653f..a333674af92f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java @@ -157,6 +157,8 @@ public java.lang.String getFieldName() { } public GetTableRequest() { + this.engine = "hive"; + this.id = -1L; } @@ -219,7 +221,8 @@ public void clear() { this.getColumnStats = false; this.processorCapabilities = null; this.processorIdentifier = null; - this.engine = null; + this.engine = "hive"; + this.id = -1L; } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index bdddf8844bdd..7ef14ac7706e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -28,7 +28,7 @@ private @org.apache.thrift.annotation.Nullable java.util.List partNames; // required private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional private @org.apache.thrift.annotation.Nullable java.lang.String validWriteIdList; // optional - private @org.apache.thrift.annotation.Nullable java.lang.String engine; // required + private @org.apache.thrift.annotation.Nullable java.lang.String engine; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -109,7 +109,7 @@ public java.lang.String getFieldName() { } // isset id assignments - private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST}; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.ENGINE}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -127,28 +127,28 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap); } public PartitionsStatsRequest() { + this.engine = "hive"; + } public PartitionsStatsRequest( java.lang.String dbName, java.lang.String tblName, java.util.List colNames, - java.util.List partNames, - java.lang.String engine) + java.util.List partNames) { this(); this.dbName = dbName; this.tblName = tblName; this.colNames = colNames; this.partNames = partNames; - this.engine = engine; } /** @@ -192,7 +192,8 @@ public void clear() { this.partNames = null; this.catName = null; this.validWriteIdList = null; - this.engine = null; + this.engine = "hive"; + } @org.apache.thrift.annotation.Nullable @@ -773,14 +774,16 @@ public java.lang.String toString() { } first = false; } - if (!first) sb.append(", "); - sb.append("engine:"); - if (this.engine == null) { - sb.append("null"); - } else { - sb.append(this.engine); + if (isSetEngine()) { + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; } - first = false; sb.append(")"); return sb.toString(); } @@ -803,10 +806,6 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'partNames' is unset! Struct:" + toString()); } - if (!isSetEngine()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); - } - // check for sub-struct validity } @@ -982,9 +981,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq } } if (struct.engine != null) { - oprot.writeFieldBegin(ENGINE_FIELD_DESC); - oprot.writeString(struct.engine); - oprot.writeFieldEnd(); + if (struct.isSetEngine()) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -1019,7 +1020,6 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(_iter627); } } - oprot.writeString(struct.engine); java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetCatName()) { optionals.set(0); @@ -1027,13 +1027,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ if (struct.isSetValidWriteIdList()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetEngine()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } } @Override @@ -1065,9 +1071,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque } } struct.setPartNamesIsSet(true); - struct.engine = iprot.readString(); - struct.setEngineIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(2); + java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); @@ -1076,6 +1080,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } + if (incoming.get(2)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java index 891fa2969a98..cbfdcbc01c4f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplicationMetricList.java @@ -329,14 +329,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ReplicationMetricLi case 1: // REPLICATION_METRIC_LIST if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1512 = iprot.readListBegin(); - struct.replicationMetricList = new java.util.ArrayList(_list1512.size); - @org.apache.thrift.annotation.Nullable ReplicationMetrics _elem1513; - for (int _i1514 = 0; _i1514 < _list1512.size; ++_i1514) + org.apache.thrift.protocol.TList _list1520 = iprot.readListBegin(); + struct.replicationMetricList = new java.util.ArrayList(_list1520.size); + @org.apache.thrift.annotation.Nullable ReplicationMetrics _elem1521; + for (int _i1522 = 0; _i1522 < _list1520.size; ++_i1522) { - _elem1513 = new ReplicationMetrics(); - _elem1513.read(iprot); - struct.replicationMetricList.add(_elem1513); + _elem1521 = new ReplicationMetrics(); + _elem1521.read(iprot); + struct.replicationMetricList.add(_elem1521); } iprot.readListEnd(); } @@ -362,9 +362,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ReplicationMetricL oprot.writeFieldBegin(REPLICATION_METRIC_LIST_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.replicationMetricList.size())); - for (ReplicationMetrics _iter1515 : struct.replicationMetricList) + for (ReplicationMetrics _iter1523 : struct.replicationMetricList) { - _iter1515.write(oprot); + _iter1523.write(oprot); } oprot.writeListEnd(); } @@ -389,9 +389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricLi org.apache.thrift.protocol.TTupleProtocol oprot = (org.apache.thrift.protocol.TTupleProtocol) prot; { oprot.writeI32(struct.replicationMetricList.size()); - for (ReplicationMetrics _iter1516 : struct.replicationMetricList) + for (ReplicationMetrics _iter1524 : struct.replicationMetricList) { - _iter1516.write(oprot); + _iter1524.write(oprot); } } } @@ -400,14 +400,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricLi public void read(org.apache.thrift.protocol.TProtocol prot, ReplicationMetricList struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TTupleProtocol iprot = (org.apache.thrift.protocol.TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list1517 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.replicationMetricList = new java.util.ArrayList(_list1517.size); - @org.apache.thrift.annotation.Nullable ReplicationMetrics _elem1518; - for (int _i1519 = 0; _i1519 < _list1517.size; ++_i1519) + org.apache.thrift.protocol.TList _list1525 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.replicationMetricList = new java.util.ArrayList(_list1525.size); + @org.apache.thrift.annotation.Nullable ReplicationMetrics _elem1526; + for (int _i1527 = 0; _i1527 < _list1525.size; ++_i1527) { - _elem1518 = new ReplicationMetrics(); - _elem1518.read(iprot); - struct.replicationMetricList.add(_elem1518); + _elem1526 = new ReplicationMetrics(); + _elem1526.read(iprot); + struct.replicationMetricList.add(_elem1526); } } struct.setReplicationMetricListIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index 1c373d92da2e..c10e726c6e70 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -24,7 +24,7 @@ private boolean needMerge; // optional private long writeId; // optional private @org.apache.thrift.annotation.Nullable java.lang.String validWriteIdList; // optional - private @org.apache.thrift.annotation.Nullable java.lang.String engine; // required + private @org.apache.thrift.annotation.Nullable java.lang.String engine; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -102,7 +102,7 @@ public java.lang.String getFieldName() { private static final int __NEEDMERGE_ISSET_ID = 0; private static final int __WRITEID_ISSET_ID = 1; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST}; + private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.ENGINE}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -115,7 +115,7 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); metaDataMap = java.util.Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap); @@ -124,15 +124,15 @@ public java.lang.String getFieldName() { public SetPartitionsStatsRequest() { this.writeId = -1L; + this.engine = "hive"; + } public SetPartitionsStatsRequest( - java.util.List colStats, - java.lang.String engine) + java.util.List colStats) { this(); this.colStats = colStats; - this.engine = engine; } /** @@ -169,7 +169,8 @@ public void clear() { this.writeId = -1L; this.validWriteIdList = null; - this.engine = null; + this.engine = "hive"; + } public int getColStatsSize() { @@ -588,14 +589,16 @@ public java.lang.String toString() { } first = false; } - if (!first) sb.append(", "); - sb.append("engine:"); - if (this.engine == null) { - sb.append("null"); - } else { - sb.append(this.engine); + if (isSetEngine()) { + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; } - first = false; sb.append(")"); return sb.toString(); } @@ -606,10 +609,6 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'colStats' is unset! Struct:" + toString()); } - if (!isSetEngine()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); - } - // check for sub-struct validity } @@ -743,9 +742,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats } } if (struct.engine != null) { - oprot.writeFieldBegin(ENGINE_FIELD_DESC); - oprot.writeString(struct.engine); - oprot.writeFieldEnd(); + if (struct.isSetEngine()) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } } oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -771,7 +772,6 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR _iter450.write(oprot); } } - oprot.writeString(struct.engine); java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetNeedMerge()) { optionals.set(0); @@ -782,7 +782,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR if (struct.isSetValidWriteIdList()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetEngine()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetNeedMerge()) { oprot.writeBool(struct.needMerge); } @@ -792,6 +795,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } } @Override @@ -809,9 +815,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe } } struct.setColStatsIsSet(true); - struct.engine = iprot.readString(); - struct.setEngineIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(3); + java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.needMerge = iprot.readBool(); struct.setNeedMergeIsSet(true); @@ -824,6 +828,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRe struct.validWriteIdList = iprot.readString(); struct.setValidWriteIdListIsSet(true); } + if (incoming.get(3)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } } } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index ea6626b78feb..685fe9d6982e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -27,7 +27,7 @@ private @org.apache.thrift.annotation.Nullable java.util.List colNames; // required private @org.apache.thrift.annotation.Nullable java.lang.String catName; // optional private @org.apache.thrift.annotation.Nullable java.lang.String validWriteIdList; // optional - private @org.apache.thrift.annotation.Nullable java.lang.String engine; // required + private @org.apache.thrift.annotation.Nullable java.lang.String engine; // optional private long id; // optional /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ @@ -111,7 +111,7 @@ public java.lang.String getFieldName() { // isset id assignments private static final int __ID_ISSET_ID = 0; private byte __isset_bitfield = 0; - private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.ID}; + private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.VALID_WRITE_ID_LIST,_Fields.ENGINE,_Fields.ID}; public static final java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { java.util.Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new java.util.EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -126,7 +126,7 @@ public java.lang.String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.REQUIRED, + tmpMap.put(_Fields.ENGINE, new org.apache.thrift.meta_data.FieldMetaData("engine", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))); @@ -135,6 +135,8 @@ public java.lang.String getFieldName() { } public TableStatsRequest() { + this.engine = "hive"; + this.id = -1L; } @@ -142,14 +144,12 @@ public TableStatsRequest() { public TableStatsRequest( java.lang.String dbName, java.lang.String tblName, - java.util.List colNames, - java.lang.String engine) + java.util.List colNames) { this(); this.dbName = dbName; this.tblName = tblName; this.colNames = colNames; - this.engine = engine; } /** @@ -190,7 +190,8 @@ public void clear() { this.colNames = null; this.catName = null; this.validWriteIdList = null; - this.engine = null; + this.engine = "hive"; + this.id = -1L; } @@ -747,14 +748,16 @@ public java.lang.String toString() { } first = false; } - if (!first) sb.append(", "); - sb.append("engine:"); - if (this.engine == null) { - sb.append("null"); - } else { - sb.append(this.engine); + if (isSetEngine()) { + if (!first) sb.append(", "); + sb.append("engine:"); + if (this.engine == null) { + sb.append("null"); + } else { + sb.append(this.engine); + } + first = false; } - first = false; if (isSetId()) { if (!first) sb.append(", "); sb.append("id:"); @@ -779,10 +782,6 @@ public void validate() throws org.apache.thrift.TException { throw new org.apache.thrift.protocol.TProtocolException("Required field 'colNames' is unset! Struct:" + toString()); } - if (!isSetEngine()) { - throw new org.apache.thrift.protocol.TProtocolException("Required field 'engine' is unset! Struct:" + toString()); - } - // check for sub-struct validity } @@ -938,9 +937,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest } } if (struct.engine != null) { - oprot.writeFieldBegin(ENGINE_FIELD_DESC); - oprot.writeString(struct.engine); - oprot.writeFieldEnd(); + if (struct.isSetEngine()) { + oprot.writeFieldBegin(ENGINE_FIELD_DESC); + oprot.writeString(struct.engine); + oprot.writeFieldEnd(); + } } if (struct.isSetId()) { oprot.writeFieldBegin(ID_FIELD_DESC); @@ -973,7 +974,6 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(_iter614); } } - oprot.writeString(struct.engine); java.util.BitSet optionals = new java.util.BitSet(); if (struct.isSetCatName()) { optionals.set(0); @@ -981,16 +981,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s if (struct.isSetValidWriteIdList()) { optionals.set(1); } - if (struct.isSetId()) { + if (struct.isSetEngine()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetId()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); if (struct.isSetCatName()) { oprot.writeString(struct.catName); } if (struct.isSetValidWriteIdList()) { oprot.writeString(struct.validWriteIdList); } + if (struct.isSetEngine()) { + oprot.writeString(struct.engine); + } if (struct.isSetId()) { oprot.writeI64(struct.id); } @@ -1014,9 +1020,7 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st } } struct.setColNamesIsSet(true); - struct.engine = iprot.readString(); - struct.setEngineIsSet(true); - java.util.BitSet incoming = iprot.readBitSet(3); + java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { struct.catName = iprot.readString(); struct.setCatNameIsSet(true); @@ -1026,6 +1030,10 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.setValidWriteIdListIsSet(true); } if (incoming.get(2)) { + struct.engine = iprot.readString(); + struct.setEngineIsSet(true); + } + if (incoming.get(3)) { struct.id = iprot.readI64(); struct.setIdIsSet(true); } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index eb9b6dc553de..57799851969b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -58665,13 +58665,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1528 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1528.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1529; - for (int _i1530 = 0; _i1530 < _list1528.size; ++_i1530) + org.apache.thrift.protocol.TList _list1536 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1536.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1537; + for (int _i1538 = 0; _i1538 < _list1536.size; ++_i1538) { - _elem1529 = iprot.readString(); - struct.success.add(_elem1529); + _elem1537 = iprot.readString(); + struct.success.add(_elem1537); } iprot.readListEnd(); } @@ -58706,9 +58706,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1531 : struct.success) + for (java.lang.String _iter1539 : struct.success) { - oprot.writeString(_iter1531); + oprot.writeString(_iter1539); } oprot.writeListEnd(); } @@ -58747,9 +58747,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1532 : struct.success) + for (java.lang.String _iter1540 : struct.success) { - oprot.writeString(_iter1532); + oprot.writeString(_iter1540); } } } @@ -58764,13 +58764,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1533 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1533.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1534; - for (int _i1535 = 0; _i1535 < _list1533.size; ++_i1535) + org.apache.thrift.protocol.TList _list1541 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1541.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1542; + for (int _i1543 = 0; _i1543 < _list1541.size; ++_i1543) { - _elem1534 = iprot.readString(); - struct.success.add(_elem1534); + _elem1542 = iprot.readString(); + struct.success.add(_elem1542); } } struct.setSuccessIsSet(true); @@ -59433,13 +59433,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1536 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1536.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1537; - for (int _i1538 = 0; _i1538 < _list1536.size; ++_i1538) + org.apache.thrift.protocol.TList _list1544 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1544.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1545; + for (int _i1546 = 0; _i1546 < _list1544.size; ++_i1546) { - _elem1537 = iprot.readString(); - struct.success.add(_elem1537); + _elem1545 = iprot.readString(); + struct.success.add(_elem1545); } iprot.readListEnd(); } @@ -59474,9 +59474,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1539 : struct.success) + for (java.lang.String _iter1547 : struct.success) { - oprot.writeString(_iter1539); + oprot.writeString(_iter1547); } oprot.writeListEnd(); } @@ -59515,9 +59515,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1540 : struct.success) + for (java.lang.String _iter1548 : struct.success) { - oprot.writeString(_iter1540); + oprot.writeString(_iter1548); } } } @@ -59532,13 +59532,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1541 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1541.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1542; - for (int _i1543 = 0; _i1543 < _list1541.size; ++_i1543) + org.apache.thrift.protocol.TList _list1549 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1549.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1550; + for (int _i1551 = 0; _i1551 < _list1549.size; ++_i1551) { - _elem1542 = iprot.readString(); - struct.success.add(_elem1542); + _elem1550 = iprot.readString(); + struct.success.add(_elem1550); } } struct.setSuccessIsSet(true); @@ -64163,13 +64163,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_dataconnectors_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1544 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1544.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1545; - for (int _i1546 = 0; _i1546 < _list1544.size; ++_i1546) + org.apache.thrift.protocol.TList _list1552 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1552.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1553; + for (int _i1554 = 0; _i1554 < _list1552.size; ++_i1554) { - _elem1545 = iprot.readString(); - struct.success.add(_elem1545); + _elem1553 = iprot.readString(); + struct.success.add(_elem1553); } iprot.readListEnd(); } @@ -64204,9 +64204,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_dataconnectors oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1547 : struct.success) + for (java.lang.String _iter1555 : struct.success) { - oprot.writeString(_iter1547); + oprot.writeString(_iter1555); } oprot.writeListEnd(); } @@ -64245,9 +64245,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_dataconnectors_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1548 : struct.success) + for (java.lang.String _iter1556 : struct.success) { - oprot.writeString(_iter1548); + oprot.writeString(_iter1556); } } } @@ -64262,13 +64262,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_dataconnectors_r java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1549 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1549.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1550; - for (int _i1551 = 0; _i1551 < _list1549.size; ++_i1551) + org.apache.thrift.protocol.TList _list1557 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1557.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1558; + for (int _i1559 = 0; _i1559 < _list1557.size; ++_i1559) { - _elem1550 = iprot.readString(); - struct.success.add(_elem1550); + _elem1558 = iprot.readString(); + struct.success.add(_elem1558); } } struct.setSuccessIsSet(true); @@ -68909,16 +68909,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1552 = iprot.readMapBegin(); - struct.success = new java.util.HashMap(2*_map1552.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key1553; - @org.apache.thrift.annotation.Nullable Type _val1554; - for (int _i1555 = 0; _i1555 < _map1552.size; ++_i1555) + org.apache.thrift.protocol.TMap _map1560 = iprot.readMapBegin(); + struct.success = new java.util.HashMap(2*_map1560.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key1561; + @org.apache.thrift.annotation.Nullable Type _val1562; + for (int _i1563 = 0; _i1563 < _map1560.size; ++_i1563) { - _key1553 = iprot.readString(); - _val1554 = new Type(); - _val1554.read(iprot); - struct.success.put(_key1553, _val1554); + _key1561 = iprot.readString(); + _val1562 = new Type(); + _val1562.read(iprot); + struct.success.put(_key1561, _val1562); } iprot.readMapEnd(); } @@ -68953,10 +68953,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (java.util.Map.Entry _iter1556 : struct.success.entrySet()) + for (java.util.Map.Entry _iter1564 : struct.success.entrySet()) { - oprot.writeString(_iter1556.getKey()); - _iter1556.getValue().write(oprot); + oprot.writeString(_iter1564.getKey()); + _iter1564.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -68995,10 +68995,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.util.Map.Entry _iter1557 : struct.success.entrySet()) + for (java.util.Map.Entry _iter1565 : struct.success.entrySet()) { - oprot.writeString(_iter1557.getKey()); - _iter1557.getValue().write(oprot); + oprot.writeString(_iter1565.getKey()); + _iter1565.getValue().write(oprot); } } } @@ -69013,16 +69013,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1558 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.HashMap(2*_map1558.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key1559; - @org.apache.thrift.annotation.Nullable Type _val1560; - for (int _i1561 = 0; _i1561 < _map1558.size; ++_i1561) + org.apache.thrift.protocol.TMap _map1566 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.HashMap(2*_map1566.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key1567; + @org.apache.thrift.annotation.Nullable Type _val1568; + for (int _i1569 = 0; _i1569 < _map1566.size; ++_i1569) { - _key1559 = iprot.readString(); - _val1560 = new Type(); - _val1560.read(iprot); - struct.success.put(_key1559, _val1560); + _key1567 = iprot.readString(); + _val1568 = new Type(); + _val1568.read(iprot); + struct.success.put(_key1567, _val1568); } } struct.setSuccessIsSet(true); @@ -70066,14 +70066,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1562 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1562.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1563; - for (int _i1564 = 0; _i1564 < _list1562.size; ++_i1564) + org.apache.thrift.protocol.TList _list1570 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1570.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1571; + for (int _i1572 = 0; _i1572 < _list1570.size; ++_i1572) { - _elem1563 = new FieldSchema(); - _elem1563.read(iprot); - struct.success.add(_elem1563); + _elem1571 = new FieldSchema(); + _elem1571.read(iprot); + struct.success.add(_elem1571); } iprot.readListEnd(); } @@ -70126,9 +70126,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1565 : struct.success) + for (FieldSchema _iter1573 : struct.success) { - _iter1565.write(oprot); + _iter1573.write(oprot); } oprot.writeListEnd(); } @@ -70183,9 +70183,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1566 : struct.success) + for (FieldSchema _iter1574 : struct.success) { - _iter1566.write(oprot); + _iter1574.write(oprot); } } } @@ -70206,14 +70206,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1567 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1567.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1568; - for (int _i1569 = 0; _i1569 < _list1567.size; ++_i1569) + org.apache.thrift.protocol.TList _list1575 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1575.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1576; + for (int _i1577 = 0; _i1577 < _list1575.size; ++_i1577) { - _elem1568 = new FieldSchema(); - _elem1568.read(iprot); - struct.success.add(_elem1568); + _elem1576 = new FieldSchema(); + _elem1576.read(iprot); + struct.success.add(_elem1576); } } struct.setSuccessIsSet(true); @@ -71376,14 +71376,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1570 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1570.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1571; - for (int _i1572 = 0; _i1572 < _list1570.size; ++_i1572) + org.apache.thrift.protocol.TList _list1578 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1578.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1579; + for (int _i1580 = 0; _i1580 < _list1578.size; ++_i1580) { - _elem1571 = new FieldSchema(); - _elem1571.read(iprot); - struct.success.add(_elem1571); + _elem1579 = new FieldSchema(); + _elem1579.read(iprot); + struct.success.add(_elem1579); } iprot.readListEnd(); } @@ -71436,9 +71436,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1573 : struct.success) + for (FieldSchema _iter1581 : struct.success) { - _iter1573.write(oprot); + _iter1581.write(oprot); } oprot.writeListEnd(); } @@ -71493,9 +71493,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1574 : struct.success) + for (FieldSchema _iter1582 : struct.success) { - _iter1574.write(oprot); + _iter1582.write(oprot); } } } @@ -71516,14 +71516,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1575 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1575.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1576; - for (int _i1577 = 0; _i1577 < _list1575.size; ++_i1577) + org.apache.thrift.protocol.TList _list1583 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1583.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1584; + for (int _i1585 = 0; _i1585 < _list1583.size; ++_i1585) { - _elem1576 = new FieldSchema(); - _elem1576.read(iprot); - struct.success.add(_elem1576); + _elem1584 = new FieldSchema(); + _elem1584.read(iprot); + struct.success.add(_elem1584); } } struct.setSuccessIsSet(true); @@ -73629,14 +73629,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1578 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1578.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1579; - for (int _i1580 = 0; _i1580 < _list1578.size; ++_i1580) + org.apache.thrift.protocol.TList _list1586 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1586.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1587; + for (int _i1588 = 0; _i1588 < _list1586.size; ++_i1588) { - _elem1579 = new FieldSchema(); - _elem1579.read(iprot); - struct.success.add(_elem1579); + _elem1587 = new FieldSchema(); + _elem1587.read(iprot); + struct.success.add(_elem1587); } iprot.readListEnd(); } @@ -73689,9 +73689,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1581 : struct.success) + for (FieldSchema _iter1589 : struct.success) { - _iter1581.write(oprot); + _iter1589.write(oprot); } oprot.writeListEnd(); } @@ -73746,9 +73746,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1582 : struct.success) + for (FieldSchema _iter1590 : struct.success) { - _iter1582.write(oprot); + _iter1590.write(oprot); } } } @@ -73769,14 +73769,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1583 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1583.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1584; - for (int _i1585 = 0; _i1585 < _list1583.size; ++_i1585) + org.apache.thrift.protocol.TList _list1591 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1591.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1592; + for (int _i1593 = 0; _i1593 < _list1591.size; ++_i1593) { - _elem1584 = new FieldSchema(); - _elem1584.read(iprot); - struct.success.add(_elem1584); + _elem1592 = new FieldSchema(); + _elem1592.read(iprot); + struct.success.add(_elem1592); } } struct.setSuccessIsSet(true); @@ -74939,14 +74939,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1586 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1586.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1587; - for (int _i1588 = 0; _i1588 < _list1586.size; ++_i1588) + org.apache.thrift.protocol.TList _list1594 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1594.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1595; + for (int _i1596 = 0; _i1596 < _list1594.size; ++_i1596) { - _elem1587 = new FieldSchema(); - _elem1587.read(iprot); - struct.success.add(_elem1587); + _elem1595 = new FieldSchema(); + _elem1595.read(iprot); + struct.success.add(_elem1595); } iprot.readListEnd(); } @@ -74999,9 +74999,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter1589 : struct.success) + for (FieldSchema _iter1597 : struct.success) { - _iter1589.write(oprot); + _iter1597.write(oprot); } oprot.writeListEnd(); } @@ -75056,9 +75056,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter1590 : struct.success) + for (FieldSchema _iter1598 : struct.success) { - _iter1590.write(oprot); + _iter1598.write(oprot); } } } @@ -75079,14 +75079,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1591 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1591.size); - @org.apache.thrift.annotation.Nullable FieldSchema _elem1592; - for (int _i1593 = 0; _i1593 < _list1591.size; ++_i1593) + org.apache.thrift.protocol.TList _list1599 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1599.size); + @org.apache.thrift.annotation.Nullable FieldSchema _elem1600; + for (int _i1601 = 0; _i1601 < _list1599.size; ++_i1601) { - _elem1592 = new FieldSchema(); - _elem1592.read(iprot); - struct.success.add(_elem1592); + _elem1600 = new FieldSchema(); + _elem1600.read(iprot); + struct.success.add(_elem1600); } } struct.setSuccessIsSet(true); @@ -79293,14 +79293,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 2: // PRIMARY_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1594 = iprot.readListBegin(); - struct.primaryKeys = new java.util.ArrayList(_list1594.size); - @org.apache.thrift.annotation.Nullable SQLPrimaryKey _elem1595; - for (int _i1596 = 0; _i1596 < _list1594.size; ++_i1596) + org.apache.thrift.protocol.TList _list1602 = iprot.readListBegin(); + struct.primaryKeys = new java.util.ArrayList(_list1602.size); + @org.apache.thrift.annotation.Nullable SQLPrimaryKey _elem1603; + for (int _i1604 = 0; _i1604 < _list1602.size; ++_i1604) { - _elem1595 = new SQLPrimaryKey(); - _elem1595.read(iprot); - struct.primaryKeys.add(_elem1595); + _elem1603 = new SQLPrimaryKey(); + _elem1603.read(iprot); + struct.primaryKeys.add(_elem1603); } iprot.readListEnd(); } @@ -79312,14 +79312,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 3: // FOREIGN_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1597 = iprot.readListBegin(); - struct.foreignKeys = new java.util.ArrayList(_list1597.size); - @org.apache.thrift.annotation.Nullable SQLForeignKey _elem1598; - for (int _i1599 = 0; _i1599 < _list1597.size; ++_i1599) + org.apache.thrift.protocol.TList _list1605 = iprot.readListBegin(); + struct.foreignKeys = new java.util.ArrayList(_list1605.size); + @org.apache.thrift.annotation.Nullable SQLForeignKey _elem1606; + for (int _i1607 = 0; _i1607 < _list1605.size; ++_i1607) { - _elem1598 = new SQLForeignKey(); - _elem1598.read(iprot); - struct.foreignKeys.add(_elem1598); + _elem1606 = new SQLForeignKey(); + _elem1606.read(iprot); + struct.foreignKeys.add(_elem1606); } iprot.readListEnd(); } @@ -79331,14 +79331,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 4: // UNIQUE_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1600 = iprot.readListBegin(); - struct.uniqueConstraints = new java.util.ArrayList(_list1600.size); - @org.apache.thrift.annotation.Nullable SQLUniqueConstraint _elem1601; - for (int _i1602 = 0; _i1602 < _list1600.size; ++_i1602) + org.apache.thrift.protocol.TList _list1608 = iprot.readListBegin(); + struct.uniqueConstraints = new java.util.ArrayList(_list1608.size); + @org.apache.thrift.annotation.Nullable SQLUniqueConstraint _elem1609; + for (int _i1610 = 0; _i1610 < _list1608.size; ++_i1610) { - _elem1601 = new SQLUniqueConstraint(); - _elem1601.read(iprot); - struct.uniqueConstraints.add(_elem1601); + _elem1609 = new SQLUniqueConstraint(); + _elem1609.read(iprot); + struct.uniqueConstraints.add(_elem1609); } iprot.readListEnd(); } @@ -79350,14 +79350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 5: // NOT_NULL_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1603 = iprot.readListBegin(); - struct.notNullConstraints = new java.util.ArrayList(_list1603.size); - @org.apache.thrift.annotation.Nullable SQLNotNullConstraint _elem1604; - for (int _i1605 = 0; _i1605 < _list1603.size; ++_i1605) + org.apache.thrift.protocol.TList _list1611 = iprot.readListBegin(); + struct.notNullConstraints = new java.util.ArrayList(_list1611.size); + @org.apache.thrift.annotation.Nullable SQLNotNullConstraint _elem1612; + for (int _i1613 = 0; _i1613 < _list1611.size; ++_i1613) { - _elem1604 = new SQLNotNullConstraint(); - _elem1604.read(iprot); - struct.notNullConstraints.add(_elem1604); + _elem1612 = new SQLNotNullConstraint(); + _elem1612.read(iprot); + struct.notNullConstraints.add(_elem1612); } iprot.readListEnd(); } @@ -79369,14 +79369,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 6: // DEFAULT_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1606 = iprot.readListBegin(); - struct.defaultConstraints = new java.util.ArrayList(_list1606.size); - @org.apache.thrift.annotation.Nullable SQLDefaultConstraint _elem1607; - for (int _i1608 = 0; _i1608 < _list1606.size; ++_i1608) + org.apache.thrift.protocol.TList _list1614 = iprot.readListBegin(); + struct.defaultConstraints = new java.util.ArrayList(_list1614.size); + @org.apache.thrift.annotation.Nullable SQLDefaultConstraint _elem1615; + for (int _i1616 = 0; _i1616 < _list1614.size; ++_i1616) { - _elem1607 = new SQLDefaultConstraint(); - _elem1607.read(iprot); - struct.defaultConstraints.add(_elem1607); + _elem1615 = new SQLDefaultConstraint(); + _elem1615.read(iprot); + struct.defaultConstraints.add(_elem1615); } iprot.readListEnd(); } @@ -79388,14 +79388,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, create_table_with_c case 7: // CHECK_CONSTRAINTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1609 = iprot.readListBegin(); - struct.checkConstraints = new java.util.ArrayList(_list1609.size); - @org.apache.thrift.annotation.Nullable SQLCheckConstraint _elem1610; - for (int _i1611 = 0; _i1611 < _list1609.size; ++_i1611) + org.apache.thrift.protocol.TList _list1617 = iprot.readListBegin(); + struct.checkConstraints = new java.util.ArrayList(_list1617.size); + @org.apache.thrift.annotation.Nullable SQLCheckConstraint _elem1618; + for (int _i1619 = 0; _i1619 < _list1617.size; ++_i1619) { - _elem1610 = new SQLCheckConstraint(); - _elem1610.read(iprot); - struct.checkConstraints.add(_elem1610); + _elem1618 = new SQLCheckConstraint(); + _elem1618.read(iprot); + struct.checkConstraints.add(_elem1618); } iprot.readListEnd(); } @@ -79426,9 +79426,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); - for (SQLPrimaryKey _iter1612 : struct.primaryKeys) + for (SQLPrimaryKey _iter1620 : struct.primaryKeys) { - _iter1612.write(oprot); + _iter1620.write(oprot); } oprot.writeListEnd(); } @@ -79438,9 +79438,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); - for (SQLForeignKey _iter1613 : struct.foreignKeys) + for (SQLForeignKey _iter1621 : struct.foreignKeys) { - _iter1613.write(oprot); + _iter1621.write(oprot); } oprot.writeListEnd(); } @@ -79450,9 +79450,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(UNIQUE_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraints.size())); - for (SQLUniqueConstraint _iter1614 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1622 : struct.uniqueConstraints) { - _iter1614.write(oprot); + _iter1622.write(oprot); } oprot.writeListEnd(); } @@ -79462,9 +79462,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size())); - for (SQLNotNullConstraint _iter1615 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1623 : struct.notNullConstraints) { - _iter1615.write(oprot); + _iter1623.write(oprot); } oprot.writeListEnd(); } @@ -79474,9 +79474,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size())); - for (SQLDefaultConstraint _iter1616 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1624 : struct.defaultConstraints) { - _iter1616.write(oprot); + _iter1624.write(oprot); } oprot.writeListEnd(); } @@ -79486,9 +79486,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, create_table_with_ oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size())); - for (SQLCheckConstraint _iter1617 : struct.checkConstraints) + for (SQLCheckConstraint _iter1625 : struct.checkConstraints) { - _iter1617.write(oprot); + _iter1625.write(oprot); } oprot.writeListEnd(); } @@ -79540,54 +79540,54 @@ public void write(org.apache.thrift.protocol.TProtocol prot, create_table_with_c if (struct.isSetPrimaryKeys()) { { oprot.writeI32(struct.primaryKeys.size()); - for (SQLPrimaryKey _iter1618 : struct.primaryKeys) + for (SQLPrimaryKey _iter1626 : struct.primaryKeys) { - _iter1618.write(oprot); + _iter1626.write(oprot); } } } if (struct.isSetForeignKeys()) { { oprot.writeI32(struct.foreignKeys.size()); - for (SQLForeignKey _iter1619 : struct.foreignKeys) + for (SQLForeignKey _iter1627 : struct.foreignKeys) { - _iter1619.write(oprot); + _iter1627.write(oprot); } } } if (struct.isSetUniqueConstraints()) { { oprot.writeI32(struct.uniqueConstraints.size()); - for (SQLUniqueConstraint _iter1620 : struct.uniqueConstraints) + for (SQLUniqueConstraint _iter1628 : struct.uniqueConstraints) { - _iter1620.write(oprot); + _iter1628.write(oprot); } } } if (struct.isSetNotNullConstraints()) { { oprot.writeI32(struct.notNullConstraints.size()); - for (SQLNotNullConstraint _iter1621 : struct.notNullConstraints) + for (SQLNotNullConstraint _iter1629 : struct.notNullConstraints) { - _iter1621.write(oprot); + _iter1629.write(oprot); } } } if (struct.isSetDefaultConstraints()) { { oprot.writeI32(struct.defaultConstraints.size()); - for (SQLDefaultConstraint _iter1622 : struct.defaultConstraints) + for (SQLDefaultConstraint _iter1630 : struct.defaultConstraints) { - _iter1622.write(oprot); + _iter1630.write(oprot); } } } if (struct.isSetCheckConstraints()) { { oprot.writeI32(struct.checkConstraints.size()); - for (SQLCheckConstraint _iter1623 : struct.checkConstraints) + for (SQLCheckConstraint _iter1631 : struct.checkConstraints) { - _iter1623.write(oprot); + _iter1631.write(oprot); } } } @@ -79604,84 +79604,84 @@ public void read(org.apache.thrift.protocol.TProtocol prot, create_table_with_co } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1624 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.primaryKeys = new java.util.ArrayList(_list1624.size); - @org.apache.thrift.annotation.Nullable SQLPrimaryKey _elem1625; - for (int _i1626 = 0; _i1626 < _list1624.size; ++_i1626) + org.apache.thrift.protocol.TList _list1632 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.primaryKeys = new java.util.ArrayList(_list1632.size); + @org.apache.thrift.annotation.Nullable SQLPrimaryKey _elem1633; + for (int _i1634 = 0; _i1634 < _list1632.size; ++_i1634) { - _elem1625 = new SQLPrimaryKey(); - _elem1625.read(iprot); - struct.primaryKeys.add(_elem1625); + _elem1633 = new SQLPrimaryKey(); + _elem1633.read(iprot); + struct.primaryKeys.add(_elem1633); } } struct.setPrimaryKeysIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1627 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.foreignKeys = new java.util.ArrayList(_list1627.size); - @org.apache.thrift.annotation.Nullable SQLForeignKey _elem1628; - for (int _i1629 = 0; _i1629 < _list1627.size; ++_i1629) + org.apache.thrift.protocol.TList _list1635 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.foreignKeys = new java.util.ArrayList(_list1635.size); + @org.apache.thrift.annotation.Nullable SQLForeignKey _elem1636; + for (int _i1637 = 0; _i1637 < _list1635.size; ++_i1637) { - _elem1628 = new SQLForeignKey(); - _elem1628.read(iprot); - struct.foreignKeys.add(_elem1628); + _elem1636 = new SQLForeignKey(); + _elem1636.read(iprot); + struct.foreignKeys.add(_elem1636); } } struct.setForeignKeysIsSet(true); } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list1630 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.uniqueConstraints = new java.util.ArrayList(_list1630.size); - @org.apache.thrift.annotation.Nullable SQLUniqueConstraint _elem1631; - for (int _i1632 = 0; _i1632 < _list1630.size; ++_i1632) + org.apache.thrift.protocol.TList _list1638 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.uniqueConstraints = new java.util.ArrayList(_list1638.size); + @org.apache.thrift.annotation.Nullable SQLUniqueConstraint _elem1639; + for (int _i1640 = 0; _i1640 < _list1638.size; ++_i1640) { - _elem1631 = new SQLUniqueConstraint(); - _elem1631.read(iprot); - struct.uniqueConstraints.add(_elem1631); + _elem1639 = new SQLUniqueConstraint(); + _elem1639.read(iprot); + struct.uniqueConstraints.add(_elem1639); } } struct.setUniqueConstraintsIsSet(true); } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1633 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.notNullConstraints = new java.util.ArrayList(_list1633.size); - @org.apache.thrift.annotation.Nullable SQLNotNullConstraint _elem1634; - for (int _i1635 = 0; _i1635 < _list1633.size; ++_i1635) + org.apache.thrift.protocol.TList _list1641 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.notNullConstraints = new java.util.ArrayList(_list1641.size); + @org.apache.thrift.annotation.Nullable SQLNotNullConstraint _elem1642; + for (int _i1643 = 0; _i1643 < _list1641.size; ++_i1643) { - _elem1634 = new SQLNotNullConstraint(); - _elem1634.read(iprot); - struct.notNullConstraints.add(_elem1634); + _elem1642 = new SQLNotNullConstraint(); + _elem1642.read(iprot); + struct.notNullConstraints.add(_elem1642); } } struct.setNotNullConstraintsIsSet(true); } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1636 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.defaultConstraints = new java.util.ArrayList(_list1636.size); - @org.apache.thrift.annotation.Nullable SQLDefaultConstraint _elem1637; - for (int _i1638 = 0; _i1638 < _list1636.size; ++_i1638) + org.apache.thrift.protocol.TList _list1644 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.defaultConstraints = new java.util.ArrayList(_list1644.size); + @org.apache.thrift.annotation.Nullable SQLDefaultConstraint _elem1645; + for (int _i1646 = 0; _i1646 < _list1644.size; ++_i1646) { - _elem1637 = new SQLDefaultConstraint(); - _elem1637.read(iprot); - struct.defaultConstraints.add(_elem1637); + _elem1645 = new SQLDefaultConstraint(); + _elem1645.read(iprot); + struct.defaultConstraints.add(_elem1645); } } struct.setDefaultConstraintsIsSet(true); } if (incoming.get(6)) { { - org.apache.thrift.protocol.TList _list1639 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.checkConstraints = new java.util.ArrayList(_list1639.size); - @org.apache.thrift.annotation.Nullable SQLCheckConstraint _elem1640; - for (int _i1641 = 0; _i1641 < _list1639.size; ++_i1641) + org.apache.thrift.protocol.TList _list1647 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.checkConstraints = new java.util.ArrayList(_list1647.size); + @org.apache.thrift.annotation.Nullable SQLCheckConstraint _elem1648; + for (int _i1649 = 0; _i1649 < _list1647.size; ++_i1649) { - _elem1640 = new SQLCheckConstraint(); - _elem1640.read(iprot); - struct.checkConstraints.add(_elem1640); + _elem1648 = new SQLCheckConstraint(); + _elem1648.read(iprot); + struct.checkConstraints.add(_elem1648); } } struct.setCheckConstraintsIsSet(true); @@ -91113,13 +91113,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, truncate_table_args case 3: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1642 = iprot.readListBegin(); - struct.partNames = new java.util.ArrayList(_list1642.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1643; - for (int _i1644 = 0; _i1644 < _list1642.size; ++_i1644) + org.apache.thrift.protocol.TList _list1650 = iprot.readListBegin(); + struct.partNames = new java.util.ArrayList(_list1650.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1651; + for (int _i1652 = 0; _i1652 < _list1650.size; ++_i1652) { - _elem1643 = iprot.readString(); - struct.partNames.add(_elem1643); + _elem1651 = iprot.readString(); + struct.partNames.add(_elem1651); } iprot.readListEnd(); } @@ -91155,9 +91155,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, truncate_table_arg oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (java.lang.String _iter1645 : struct.partNames) + for (java.lang.String _iter1653 : struct.partNames) { - oprot.writeString(_iter1645); + oprot.writeString(_iter1653); } oprot.writeListEnd(); } @@ -91200,9 +91200,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, truncate_table_args if (struct.isSetPartNames()) { { oprot.writeI32(struct.partNames.size()); - for (java.lang.String _iter1646 : struct.partNames) + for (java.lang.String _iter1654 : struct.partNames) { - oprot.writeString(_iter1646); + oprot.writeString(_iter1654); } } } @@ -91222,13 +91222,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, truncate_table_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1647 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.partNames = new java.util.ArrayList(_list1647.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1648; - for (int _i1649 = 0; _i1649 < _list1647.size; ++_i1649) + org.apache.thrift.protocol.TList _list1655 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.partNames = new java.util.ArrayList(_list1655.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1656; + for (int _i1657 = 0; _i1657 < _list1655.size; ++_i1657) { - _elem1648 = iprot.readString(); - struct.partNames.add(_elem1648); + _elem1656 = iprot.readString(); + struct.partNames.add(_elem1656); } } struct.setPartNamesIsSet(true); @@ -93306,13 +93306,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1650 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1650.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1651; - for (int _i1652 = 0; _i1652 < _list1650.size; ++_i1652) + org.apache.thrift.protocol.TList _list1658 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1658.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1659; + for (int _i1660 = 0; _i1660 < _list1658.size; ++_i1660) { - _elem1651 = iprot.readString(); - struct.success.add(_elem1651); + _elem1659 = iprot.readString(); + struct.success.add(_elem1659); } iprot.readListEnd(); } @@ -93347,9 +93347,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1653 : struct.success) + for (java.lang.String _iter1661 : struct.success) { - oprot.writeString(_iter1653); + oprot.writeString(_iter1661); } oprot.writeListEnd(); } @@ -93388,9 +93388,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1654 : struct.success) + for (java.lang.String _iter1662 : struct.success) { - oprot.writeString(_iter1654); + oprot.writeString(_iter1662); } } } @@ -93405,13 +93405,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1655 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1655.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1656; - for (int _i1657 = 0; _i1657 < _list1655.size; ++_i1657) + org.apache.thrift.protocol.TList _list1663 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1663.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1664; + for (int _i1665 = 0; _i1665 < _list1663.size; ++_i1665) { - _elem1656 = iprot.readString(); - struct.success.add(_elem1656); + _elem1664 = iprot.readString(); + struct.success.add(_elem1664); } } struct.setSuccessIsSet(true); @@ -94394,13 +94394,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_by_type_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1658 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1658.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1659; - for (int _i1660 = 0; _i1660 < _list1658.size; ++_i1660) + org.apache.thrift.protocol.TList _list1666 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1666.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1667; + for (int _i1668 = 0; _i1668 < _list1666.size; ++_i1668) { - _elem1659 = iprot.readString(); - struct.success.add(_elem1659); + _elem1667 = iprot.readString(); + struct.success.add(_elem1667); } iprot.readListEnd(); } @@ -94435,9 +94435,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_by_type oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1661 : struct.success) + for (java.lang.String _iter1669 : struct.success) { - oprot.writeString(_iter1661); + oprot.writeString(_iter1669); } oprot.writeListEnd(); } @@ -94476,9 +94476,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1662 : struct.success) + for (java.lang.String _iter1670 : struct.success) { - oprot.writeString(_iter1662); + oprot.writeString(_iter1670); } } } @@ -94493,13 +94493,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_by_type_r java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1663 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1663.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1664; - for (int _i1665 = 0; _i1665 < _list1663.size; ++_i1665) + org.apache.thrift.protocol.TList _list1671 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1671.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1672; + for (int _i1673 = 0; _i1673 < _list1671.size; ++_i1673) { - _elem1664 = iprot.readString(); - struct.success.add(_elem1664); + _elem1672 = iprot.readString(); + struct.success.add(_elem1672); } } struct.setSuccessIsSet(true); @@ -95165,14 +95165,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_materialize case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1666 = iprot.readListBegin(); - struct.success = new java.util.ArrayList
(_list1666.size); - @org.apache.thrift.annotation.Nullable Table _elem1667; - for (int _i1668 = 0; _i1668 < _list1666.size; ++_i1668) + org.apache.thrift.protocol.TList _list1674 = iprot.readListBegin(); + struct.success = new java.util.ArrayList
(_list1674.size); + @org.apache.thrift.annotation.Nullable Table _elem1675; + for (int _i1676 = 0; _i1676 < _list1674.size; ++_i1676) { - _elem1667 = new Table(); - _elem1667.read(iprot); - struct.success.add(_elem1667); + _elem1675 = new Table(); + _elem1675.read(iprot); + struct.success.add(_elem1675); } iprot.readListEnd(); } @@ -95207,9 +95207,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_materializ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1669 : struct.success) + for (Table _iter1677 : struct.success) { - _iter1669.write(oprot); + _iter1677.write(oprot); } oprot.writeListEnd(); } @@ -95248,9 +95248,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_materialize if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1670 : struct.success) + for (Table _iter1678 : struct.success) { - _iter1670.write(oprot); + _iter1678.write(oprot); } } } @@ -95265,14 +95265,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_materialized java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1671 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList
(_list1671.size); - @org.apache.thrift.annotation.Nullable Table _elem1672; - for (int _i1673 = 0; _i1673 < _list1671.size; ++_i1673) + org.apache.thrift.protocol.TList _list1679 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList
(_list1679.size); + @org.apache.thrift.annotation.Nullable Table _elem1680; + for (int _i1681 = 0; _i1681 < _list1679.size; ++_i1681) { - _elem1672 = new Table(); - _elem1672.read(iprot); - struct.success.add(_elem1672); + _elem1680 = new Table(); + _elem1680.read(iprot); + struct.success.add(_elem1680); } } struct.setSuccessIsSet(true); @@ -96047,13 +96047,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_materialized_vi case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1674 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1674.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1675; - for (int _i1676 = 0; _i1676 < _list1674.size; ++_i1676) + org.apache.thrift.protocol.TList _list1682 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1682.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1683; + for (int _i1684 = 0; _i1684 < _list1682.size; ++_i1684) { - _elem1675 = iprot.readString(); - struct.success.add(_elem1675); + _elem1683 = iprot.readString(); + struct.success.add(_elem1683); } iprot.readListEnd(); } @@ -96088,9 +96088,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_materialized_v oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1677 : struct.success) + for (java.lang.String _iter1685 : struct.success) { - oprot.writeString(_iter1677); + oprot.writeString(_iter1685); } oprot.writeListEnd(); } @@ -96129,9 +96129,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_materialized_vi if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1678 : struct.success) + for (java.lang.String _iter1686 : struct.success) { - oprot.writeString(_iter1678); + oprot.writeString(_iter1686); } } } @@ -96146,13 +96146,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_materialized_vie java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1679 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1679.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1680; - for (int _i1681 = 0; _i1681 < _list1679.size; ++_i1681) + org.apache.thrift.protocol.TList _list1687 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1687.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1688; + for (int _i1689 = 0; _i1689 < _list1687.size; ++_i1689) { - _elem1680 = iprot.readString(); - struct.success.add(_elem1680); + _elem1688 = iprot.readString(); + struct.success.add(_elem1688); } } struct.setSuccessIsSet(true); @@ -96662,13 +96662,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1682 = iprot.readListBegin(); - struct.tbl_types = new java.util.ArrayList(_list1682.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1683; - for (int _i1684 = 0; _i1684 < _list1682.size; ++_i1684) + org.apache.thrift.protocol.TList _list1690 = iprot.readListBegin(); + struct.tbl_types = new java.util.ArrayList(_list1690.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1691; + for (int _i1692 = 0; _i1692 < _list1690.size; ++_i1692) { - _elem1683 = iprot.readString(); - struct.tbl_types.add(_elem1683); + _elem1691 = iprot.readString(); + struct.tbl_types.add(_elem1691); } iprot.readListEnd(); } @@ -96704,9 +96704,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (java.lang.String _iter1685 : struct.tbl_types) + for (java.lang.String _iter1693 : struct.tbl_types) { - oprot.writeString(_iter1685); + oprot.writeString(_iter1693); } oprot.writeListEnd(); } @@ -96749,9 +96749,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (java.lang.String _iter1686 : struct.tbl_types) + for (java.lang.String _iter1694 : struct.tbl_types) { - oprot.writeString(_iter1686); + oprot.writeString(_iter1694); } } } @@ -96771,13 +96771,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1687 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.tbl_types = new java.util.ArrayList(_list1687.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1688; - for (int _i1689 = 0; _i1689 < _list1687.size; ++_i1689) + org.apache.thrift.protocol.TList _list1695 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.tbl_types = new java.util.ArrayList(_list1695.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1696; + for (int _i1697 = 0; _i1697 < _list1695.size; ++_i1697) { - _elem1688 = iprot.readString(); - struct.tbl_types.add(_elem1688); + _elem1696 = iprot.readString(); + struct.tbl_types.add(_elem1696); } } struct.setTbl_typesIsSet(true); @@ -97188,14 +97188,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1690 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1690.size); - @org.apache.thrift.annotation.Nullable TableMeta _elem1691; - for (int _i1692 = 0; _i1692 < _list1690.size; ++_i1692) + org.apache.thrift.protocol.TList _list1698 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1698.size); + @org.apache.thrift.annotation.Nullable TableMeta _elem1699; + for (int _i1700 = 0; _i1700 < _list1698.size; ++_i1700) { - _elem1691 = new TableMeta(); - _elem1691.read(iprot); - struct.success.add(_elem1691); + _elem1699 = new TableMeta(); + _elem1699.read(iprot); + struct.success.add(_elem1699); } iprot.readListEnd(); } @@ -97230,9 +97230,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter1693 : struct.success) + for (TableMeta _iter1701 : struct.success) { - _iter1693.write(oprot); + _iter1701.write(oprot); } oprot.writeListEnd(); } @@ -97271,9 +97271,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter1694 : struct.success) + for (TableMeta _iter1702 : struct.success) { - _iter1694.write(oprot); + _iter1702.write(oprot); } } } @@ -97288,14 +97288,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1695 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1695.size); - @org.apache.thrift.annotation.Nullable TableMeta _elem1696; - for (int _i1697 = 0; _i1697 < _list1695.size; ++_i1697) + org.apache.thrift.protocol.TList _list1703 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1703.size); + @org.apache.thrift.annotation.Nullable TableMeta _elem1704; + for (int _i1705 = 0; _i1705 < _list1703.size; ++_i1705) { - _elem1696 = new TableMeta(); - _elem1696.read(iprot); - struct.success.add(_elem1696); + _elem1704 = new TableMeta(); + _elem1704.read(iprot); + struct.success.add(_elem1704); } } struct.setSuccessIsSet(true); @@ -98070,13 +98070,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1698 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1698.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1699; - for (int _i1700 = 0; _i1700 < _list1698.size; ++_i1700) + org.apache.thrift.protocol.TList _list1706 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1706.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1707; + for (int _i1708 = 0; _i1708 < _list1706.size; ++_i1708) { - _elem1699 = iprot.readString(); - struct.success.add(_elem1699); + _elem1707 = iprot.readString(); + struct.success.add(_elem1707); } iprot.readListEnd(); } @@ -98111,9 +98111,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1701 : struct.success) + for (java.lang.String _iter1709 : struct.success) { - oprot.writeString(_iter1701); + oprot.writeString(_iter1709); } oprot.writeListEnd(); } @@ -98152,9 +98152,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1702 : struct.success) + for (java.lang.String _iter1710 : struct.success) { - oprot.writeString(_iter1702); + oprot.writeString(_iter1710); } } } @@ -98169,13 +98169,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1703 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1703.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1704; - for (int _i1705 = 0; _i1705 < _list1703.size; ++_i1705) + org.apache.thrift.protocol.TList _list1711 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1711.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1712; + for (int _i1713 = 0; _i1713 < _list1711.size; ++_i1713) { - _elem1704 = iprot.readString(); - struct.success.add(_elem1704); + _elem1712 = iprot.readString(); + struct.success.add(_elem1712); } } struct.setSuccessIsSet(true); @@ -99641,13 +99641,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1706 = iprot.readListBegin(); - struct.tbl_names = new java.util.ArrayList(_list1706.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1707; - for (int _i1708 = 0; _i1708 < _list1706.size; ++_i1708) + org.apache.thrift.protocol.TList _list1714 = iprot.readListBegin(); + struct.tbl_names = new java.util.ArrayList(_list1714.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1715; + for (int _i1716 = 0; _i1716 < _list1714.size; ++_i1716) { - _elem1707 = iprot.readString(); - struct.tbl_names.add(_elem1707); + _elem1715 = iprot.readString(); + struct.tbl_names.add(_elem1715); } iprot.readListEnd(); } @@ -99678,9 +99678,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (java.lang.String _iter1709 : struct.tbl_names) + for (java.lang.String _iter1717 : struct.tbl_names) { - oprot.writeString(_iter1709); + oprot.writeString(_iter1717); } oprot.writeListEnd(); } @@ -99717,9 +99717,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (java.lang.String _iter1710 : struct.tbl_names) + for (java.lang.String _iter1718 : struct.tbl_names) { - oprot.writeString(_iter1710); + oprot.writeString(_iter1718); } } } @@ -99735,13 +99735,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1711 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.tbl_names = new java.util.ArrayList(_list1711.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1712; - for (int _i1713 = 0; _i1713 < _list1711.size; ++_i1713) + org.apache.thrift.protocol.TList _list1719 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.tbl_names = new java.util.ArrayList(_list1719.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1720; + for (int _i1721 = 0; _i1721 < _list1719.size; ++_i1721) { - _elem1712 = iprot.readString(); - struct.tbl_names.add(_elem1712); + _elem1720 = iprot.readString(); + struct.tbl_names.add(_elem1720); } } struct.setTbl_namesIsSet(true); @@ -100071,14 +100071,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1714 = iprot.readListBegin(); - struct.success = new java.util.ArrayList
(_list1714.size); - @org.apache.thrift.annotation.Nullable Table _elem1715; - for (int _i1716 = 0; _i1716 < _list1714.size; ++_i1716) + org.apache.thrift.protocol.TList _list1722 = iprot.readListBegin(); + struct.success = new java.util.ArrayList
(_list1722.size); + @org.apache.thrift.annotation.Nullable Table _elem1723; + for (int _i1724 = 0; _i1724 < _list1722.size; ++_i1724) { - _elem1715 = new Table(); - _elem1715.read(iprot); - struct.success.add(_elem1715); + _elem1723 = new Table(); + _elem1723.read(iprot); + struct.success.add(_elem1723); } iprot.readListEnd(); } @@ -100104,9 +100104,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter1717 : struct.success) + for (Table _iter1725 : struct.success) { - _iter1717.write(oprot); + _iter1725.write(oprot); } oprot.writeListEnd(); } @@ -100137,9 +100137,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter1718 : struct.success) + for (Table _iter1726 : struct.success) { - _iter1718.write(oprot); + _iter1726.write(oprot); } } } @@ -100151,14 +100151,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1719 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList
(_list1719.size); - @org.apache.thrift.annotation.Nullable Table _elem1720; - for (int _i1721 = 0; _i1721 < _list1719.size; ++_i1721) + org.apache.thrift.protocol.TList _list1727 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList
(_list1727.size); + @org.apache.thrift.annotation.Nullable Table _elem1728; + for (int _i1729 = 0; _i1729 < _list1727.size; ++_i1729) { - _elem1720 = new Table(); - _elem1720.read(iprot); - struct.success.add(_elem1720); + _elem1728 = new Table(); + _elem1728.read(iprot); + struct.success.add(_elem1728); } } struct.setSuccessIsSet(true); @@ -100936,14 +100936,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_ext_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1722 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1722.size); - @org.apache.thrift.annotation.Nullable ExtendedTableInfo _elem1723; - for (int _i1724 = 0; _i1724 < _list1722.size; ++_i1724) + org.apache.thrift.protocol.TList _list1730 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1730.size); + @org.apache.thrift.annotation.Nullable ExtendedTableInfo _elem1731; + for (int _i1732 = 0; _i1732 < _list1730.size; ++_i1732) { - _elem1723 = new ExtendedTableInfo(); - _elem1723.read(iprot); - struct.success.add(_elem1723); + _elem1731 = new ExtendedTableInfo(); + _elem1731.read(iprot); + struct.success.add(_elem1731); } iprot.readListEnd(); } @@ -100978,9 +100978,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_ext_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (ExtendedTableInfo _iter1725 : struct.success) + for (ExtendedTableInfo _iter1733 : struct.success) { - _iter1725.write(oprot); + _iter1733.write(oprot); } oprot.writeListEnd(); } @@ -101019,9 +101019,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (ExtendedTableInfo _iter1726 : struct.success) + for (ExtendedTableInfo _iter1734 : struct.success) { - _iter1726.write(oprot); + _iter1734.write(oprot); } } } @@ -101036,14 +101036,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_ext_resul java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1727 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1727.size); - @org.apache.thrift.annotation.Nullable ExtendedTableInfo _elem1728; - for (int _i1729 = 0; _i1729 < _list1727.size; ++_i1729) + org.apache.thrift.protocol.TList _list1735 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1735.size); + @org.apache.thrift.annotation.Nullable ExtendedTableInfo _elem1736; + for (int _i1737 = 0; _i1737 < _list1735.size; ++_i1737) { - _elem1728 = new ExtendedTableInfo(); - _elem1728.read(iprot); - struct.success.add(_elem1728); + _elem1736 = new ExtendedTableInfo(); + _elem1736.read(iprot); + struct.success.add(_elem1736); } } struct.setSuccessIsSet(true); @@ -106594,13 +106594,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1730 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1730.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1731; - for (int _i1732 = 0; _i1732 < _list1730.size; ++_i1732) + org.apache.thrift.protocol.TList _list1738 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1738.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1739; + for (int _i1740 = 0; _i1740 < _list1738.size; ++_i1740) { - _elem1731 = iprot.readString(); - struct.success.add(_elem1731); + _elem1739 = iprot.readString(); + struct.success.add(_elem1739); } iprot.readListEnd(); } @@ -106653,9 +106653,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1733 : struct.success) + for (java.lang.String _iter1741 : struct.success) { - oprot.writeString(_iter1733); + oprot.writeString(_iter1741); } oprot.writeListEnd(); } @@ -106710,9 +106710,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1734 : struct.success) + for (java.lang.String _iter1742 : struct.success) { - oprot.writeString(_iter1734); + oprot.writeString(_iter1742); } } } @@ -106733,13 +106733,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1735 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1735.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1736; - for (int _i1737 = 0; _i1737 < _list1735.size; ++_i1737) + org.apache.thrift.protocol.TList _list1743 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1743.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1744; + for (int _i1745 = 0; _i1745 < _list1743.size; ++_i1745) { - _elem1736 = iprot.readString(); - struct.success.add(_elem1736); + _elem1744 = iprot.readString(); + struct.success.add(_elem1744); } } struct.setSuccessIsSet(true); @@ -113586,14 +113586,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1738 = iprot.readListBegin(); - struct.new_parts = new java.util.ArrayList(_list1738.size); - @org.apache.thrift.annotation.Nullable Partition _elem1739; - for (int _i1740 = 0; _i1740 < _list1738.size; ++_i1740) + org.apache.thrift.protocol.TList _list1746 = iprot.readListBegin(); + struct.new_parts = new java.util.ArrayList(_list1746.size); + @org.apache.thrift.annotation.Nullable Partition _elem1747; + for (int _i1748 = 0; _i1748 < _list1746.size; ++_i1748) { - _elem1739 = new Partition(); - _elem1739.read(iprot); - struct.new_parts.add(_elem1739); + _elem1747 = new Partition(); + _elem1747.read(iprot); + struct.new_parts.add(_elem1747); } iprot.readListEnd(); } @@ -113619,9 +113619,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1741 : struct.new_parts) + for (Partition _iter1749 : struct.new_parts) { - _iter1741.write(oprot); + _iter1749.write(oprot); } oprot.writeListEnd(); } @@ -113652,9 +113652,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1742 : struct.new_parts) + for (Partition _iter1750 : struct.new_parts) { - _iter1742.write(oprot); + _iter1750.write(oprot); } } } @@ -113666,14 +113666,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1743 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.new_parts = new java.util.ArrayList(_list1743.size); - @org.apache.thrift.annotation.Nullable Partition _elem1744; - for (int _i1745 = 0; _i1745 < _list1743.size; ++_i1745) + org.apache.thrift.protocol.TList _list1751 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.new_parts = new java.util.ArrayList(_list1751.size); + @org.apache.thrift.annotation.Nullable Partition _elem1752; + for (int _i1753 = 0; _i1753 < _list1751.size; ++_i1753) { - _elem1744 = new Partition(); - _elem1744.read(iprot); - struct.new_parts.add(_elem1744); + _elem1752 = new Partition(); + _elem1752.read(iprot); + struct.new_parts.add(_elem1752); } } struct.setNew_partsIsSet(true); @@ -114680,14 +114680,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1746 = iprot.readListBegin(); - struct.new_parts = new java.util.ArrayList(_list1746.size); - @org.apache.thrift.annotation.Nullable PartitionSpec _elem1747; - for (int _i1748 = 0; _i1748 < _list1746.size; ++_i1748) + org.apache.thrift.protocol.TList _list1754 = iprot.readListBegin(); + struct.new_parts = new java.util.ArrayList(_list1754.size); + @org.apache.thrift.annotation.Nullable PartitionSpec _elem1755; + for (int _i1756 = 0; _i1756 < _list1754.size; ++_i1756) { - _elem1747 = new PartitionSpec(); - _elem1747.read(iprot); - struct.new_parts.add(_elem1747); + _elem1755 = new PartitionSpec(); + _elem1755.read(iprot); + struct.new_parts.add(_elem1755); } iprot.readListEnd(); } @@ -114713,9 +114713,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter1749 : struct.new_parts) + for (PartitionSpec _iter1757 : struct.new_parts) { - _iter1749.write(oprot); + _iter1757.write(oprot); } oprot.writeListEnd(); } @@ -114746,9 +114746,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter1750 : struct.new_parts) + for (PartitionSpec _iter1758 : struct.new_parts) { - _iter1750.write(oprot); + _iter1758.write(oprot); } } } @@ -114760,14 +114760,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1751 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.new_parts = new java.util.ArrayList(_list1751.size); - @org.apache.thrift.annotation.Nullable PartitionSpec _elem1752; - for (int _i1753 = 0; _i1753 < _list1751.size; ++_i1753) + org.apache.thrift.protocol.TList _list1759 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.new_parts = new java.util.ArrayList(_list1759.size); + @org.apache.thrift.annotation.Nullable PartitionSpec _elem1760; + for (int _i1761 = 0; _i1761 < _list1759.size; ++_i1761) { - _elem1752 = new PartitionSpec(); - _elem1752.read(iprot); - struct.new_parts.add(_elem1752); + _elem1760 = new PartitionSpec(); + _elem1760.read(iprot); + struct.new_parts.add(_elem1760); } } struct.setNew_partsIsSet(true); @@ -115949,13 +115949,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1754 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1754.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1755; - for (int _i1756 = 0; _i1756 < _list1754.size; ++_i1756) + org.apache.thrift.protocol.TList _list1762 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1762.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1763; + for (int _i1764 = 0; _i1764 < _list1762.size; ++_i1764) { - _elem1755 = iprot.readString(); - struct.part_vals.add(_elem1755); + _elem1763 = iprot.readString(); + struct.part_vals.add(_elem1763); } iprot.readListEnd(); } @@ -115991,9 +115991,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1757 : struct.part_vals) + for (java.lang.String _iter1765 : struct.part_vals) { - oprot.writeString(_iter1757); + oprot.writeString(_iter1765); } oprot.writeListEnd(); } @@ -116036,9 +116036,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1758 : struct.part_vals) + for (java.lang.String _iter1766 : struct.part_vals) { - oprot.writeString(_iter1758); + oprot.writeString(_iter1766); } } } @@ -116058,13 +116058,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1759 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1759.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1760; - for (int _i1761 = 0; _i1761 < _list1759.size; ++_i1761) + org.apache.thrift.protocol.TList _list1767 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1767.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1768; + for (int _i1769 = 0; _i1769 < _list1767.size; ++_i1769) { - _elem1760 = iprot.readString(); - struct.part_vals.add(_elem1760); + _elem1768 = iprot.readString(); + struct.part_vals.add(_elem1768); } } struct.setPart_valsIsSet(true); @@ -118390,13 +118390,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1762 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1762.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1763; - for (int _i1764 = 0; _i1764 < _list1762.size; ++_i1764) + org.apache.thrift.protocol.TList _list1770 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1770.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1771; + for (int _i1772 = 0; _i1772 < _list1770.size; ++_i1772) { - _elem1763 = iprot.readString(); - struct.part_vals.add(_elem1763); + _elem1771 = iprot.readString(); + struct.part_vals.add(_elem1771); } iprot.readListEnd(); } @@ -118441,9 +118441,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1765 : struct.part_vals) + for (java.lang.String _iter1773 : struct.part_vals) { - oprot.writeString(_iter1765); + oprot.writeString(_iter1773); } oprot.writeListEnd(); } @@ -118494,9 +118494,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1766 : struct.part_vals) + for (java.lang.String _iter1774 : struct.part_vals) { - oprot.writeString(_iter1766); + oprot.writeString(_iter1774); } } } @@ -118519,13 +118519,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1767 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1767.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1768; - for (int _i1769 = 0; _i1769 < _list1767.size; ++_i1769) + org.apache.thrift.protocol.TList _list1775 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1775.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1776; + for (int _i1777 = 0; _i1777 < _list1775.size; ++_i1777) { - _elem1768 = iprot.readString(); - struct.part_vals.add(_elem1768); + _elem1776 = iprot.readString(); + struct.part_vals.add(_elem1776); } } struct.setPart_valsIsSet(true); @@ -122417,13 +122417,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1770 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1770.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1771; - for (int _i1772 = 0; _i1772 < _list1770.size; ++_i1772) + org.apache.thrift.protocol.TList _list1778 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1778.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1779; + for (int _i1780 = 0; _i1780 < _list1778.size; ++_i1780) { - _elem1771 = iprot.readString(); - struct.part_vals.add(_elem1771); + _elem1779 = iprot.readString(); + struct.part_vals.add(_elem1779); } iprot.readListEnd(); } @@ -122467,9 +122467,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1773 : struct.part_vals) + for (java.lang.String _iter1781 : struct.part_vals) { - oprot.writeString(_iter1773); + oprot.writeString(_iter1781); } oprot.writeListEnd(); } @@ -122518,9 +122518,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1774 : struct.part_vals) + for (java.lang.String _iter1782 : struct.part_vals) { - oprot.writeString(_iter1774); + oprot.writeString(_iter1782); } } } @@ -122543,13 +122543,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1775 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1775.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1776; - for (int _i1777 = 0; _i1777 < _list1775.size; ++_i1777) + org.apache.thrift.protocol.TList _list1783 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1783.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1784; + for (int _i1785 = 0; _i1785 < _list1783.size; ++_i1785) { - _elem1776 = iprot.readString(); - struct.part_vals.add(_elem1776); + _elem1784 = iprot.readString(); + struct.part_vals.add(_elem1784); } } struct.setPart_valsIsSet(true); @@ -123791,13 +123791,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1778 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1778.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1779; - for (int _i1780 = 0; _i1780 < _list1778.size; ++_i1780) + org.apache.thrift.protocol.TList _list1786 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1786.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1787; + for (int _i1788 = 0; _i1788 < _list1786.size; ++_i1788) { - _elem1779 = iprot.readString(); - struct.part_vals.add(_elem1779); + _elem1787 = iprot.readString(); + struct.part_vals.add(_elem1787); } iprot.readListEnd(); } @@ -123850,9 +123850,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1781 : struct.part_vals) + for (java.lang.String _iter1789 : struct.part_vals) { - oprot.writeString(_iter1781); + oprot.writeString(_iter1789); } oprot.writeListEnd(); } @@ -123909,9 +123909,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1782 : struct.part_vals) + for (java.lang.String _iter1790 : struct.part_vals) { - oprot.writeString(_iter1782); + oprot.writeString(_iter1790); } } } @@ -123937,13 +123937,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1783 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1783.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1784; - for (int _i1785 = 0; _i1785 < _list1783.size; ++_i1785) + org.apache.thrift.protocol.TList _list1791 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1791.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1792; + for (int _i1793 = 0; _i1793 < _list1791.size; ++_i1793) { - _elem1784 = iprot.readString(); - struct.part_vals.add(_elem1784); + _elem1792 = iprot.readString(); + struct.part_vals.add(_elem1792); } } struct.setPart_valsIsSet(true); @@ -128563,13 +128563,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1786 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1786.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1787; - for (int _i1788 = 0; _i1788 < _list1786.size; ++_i1788) + org.apache.thrift.protocol.TList _list1794 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1794.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1795; + for (int _i1796 = 0; _i1796 < _list1794.size; ++_i1796) { - _elem1787 = iprot.readString(); - struct.part_vals.add(_elem1787); + _elem1795 = iprot.readString(); + struct.part_vals.add(_elem1795); } iprot.readListEnd(); } @@ -128605,9 +128605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1789 : struct.part_vals) + for (java.lang.String _iter1797 : struct.part_vals) { - oprot.writeString(_iter1789); + oprot.writeString(_iter1797); } oprot.writeListEnd(); } @@ -128650,9 +128650,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1790 : struct.part_vals) + for (java.lang.String _iter1798 : struct.part_vals) { - oprot.writeString(_iter1790); + oprot.writeString(_iter1798); } } } @@ -128672,13 +128672,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1791 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1791.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1792; - for (int _i1793 = 0; _i1793 < _list1791.size; ++_i1793) + org.apache.thrift.protocol.TList _list1799 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1799.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1800; + for (int _i1801 = 0; _i1801 < _list1799.size; ++_i1801) { - _elem1792 = iprot.readString(); - struct.part_vals.add(_elem1792); + _elem1800 = iprot.readString(); + struct.part_vals.add(_elem1800); } } struct.setPart_valsIsSet(true); @@ -130850,15 +130850,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1794 = iprot.readMapBegin(); - struct.partitionSpecs = new java.util.HashMap(2*_map1794.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key1795; - @org.apache.thrift.annotation.Nullable java.lang.String _val1796; - for (int _i1797 = 0; _i1797 < _map1794.size; ++_i1797) + org.apache.thrift.protocol.TMap _map1802 = iprot.readMapBegin(); + struct.partitionSpecs = new java.util.HashMap(2*_map1802.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key1803; + @org.apache.thrift.annotation.Nullable java.lang.String _val1804; + for (int _i1805 = 0; _i1805 < _map1802.size; ++_i1805) { - _key1795 = iprot.readString(); - _val1796 = iprot.readString(); - struct.partitionSpecs.put(_key1795, _val1796); + _key1803 = iprot.readString(); + _val1804 = iprot.readString(); + struct.partitionSpecs.put(_key1803, _val1804); } iprot.readMapEnd(); } @@ -130916,10 +130916,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (java.util.Map.Entry _iter1798 : struct.partitionSpecs.entrySet()) + for (java.util.Map.Entry _iter1806 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1798.getKey()); - oprot.writeString(_iter1798.getValue()); + oprot.writeString(_iter1806.getKey()); + oprot.writeString(_iter1806.getValue()); } oprot.writeMapEnd(); } @@ -130982,10 +130982,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (java.util.Map.Entry _iter1799 : struct.partitionSpecs.entrySet()) + for (java.util.Map.Entry _iter1807 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1799.getKey()); - oprot.writeString(_iter1799.getValue()); + oprot.writeString(_iter1807.getKey()); + oprot.writeString(_iter1807.getValue()); } } } @@ -131009,15 +131009,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a java.util.BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1800 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.partitionSpecs = new java.util.HashMap(2*_map1800.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key1801; - @org.apache.thrift.annotation.Nullable java.lang.String _val1802; - for (int _i1803 = 0; _i1803 < _map1800.size; ++_i1803) + org.apache.thrift.protocol.TMap _map1808 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.partitionSpecs = new java.util.HashMap(2*_map1808.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key1809; + @org.apache.thrift.annotation.Nullable java.lang.String _val1810; + for (int _i1811 = 0; _i1811 < _map1808.size; ++_i1811) { - _key1801 = iprot.readString(); - _val1802 = iprot.readString(); - struct.partitionSpecs.put(_key1801, _val1802); + _key1809 = iprot.readString(); + _val1810 = iprot.readString(); + struct.partitionSpecs.put(_key1809, _val1810); } } struct.setPartitionSpecsIsSet(true); @@ -132471,15 +132471,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map1804 = iprot.readMapBegin(); - struct.partitionSpecs = new java.util.HashMap(2*_map1804.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key1805; - @org.apache.thrift.annotation.Nullable java.lang.String _val1806; - for (int _i1807 = 0; _i1807 < _map1804.size; ++_i1807) + org.apache.thrift.protocol.TMap _map1812 = iprot.readMapBegin(); + struct.partitionSpecs = new java.util.HashMap(2*_map1812.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key1813; + @org.apache.thrift.annotation.Nullable java.lang.String _val1814; + for (int _i1815 = 0; _i1815 < _map1812.size; ++_i1815) { - _key1805 = iprot.readString(); - _val1806 = iprot.readString(); - struct.partitionSpecs.put(_key1805, _val1806); + _key1813 = iprot.readString(); + _val1814 = iprot.readString(); + struct.partitionSpecs.put(_key1813, _val1814); } iprot.readMapEnd(); } @@ -132537,10 +132537,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (java.util.Map.Entry _iter1808 : struct.partitionSpecs.entrySet()) + for (java.util.Map.Entry _iter1816 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1808.getKey()); - oprot.writeString(_iter1808.getValue()); + oprot.writeString(_iter1816.getKey()); + oprot.writeString(_iter1816.getValue()); } oprot.writeMapEnd(); } @@ -132603,10 +132603,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (java.util.Map.Entry _iter1809 : struct.partitionSpecs.entrySet()) + for (java.util.Map.Entry _iter1817 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter1809.getKey()); - oprot.writeString(_iter1809.getValue()); + oprot.writeString(_iter1817.getKey()); + oprot.writeString(_iter1817.getValue()); } } } @@ -132630,15 +132630,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ java.util.BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map1810 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.partitionSpecs = new java.util.HashMap(2*_map1810.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key1811; - @org.apache.thrift.annotation.Nullable java.lang.String _val1812; - for (int _i1813 = 0; _i1813 < _map1810.size; ++_i1813) + org.apache.thrift.protocol.TMap _map1818 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.partitionSpecs = new java.util.HashMap(2*_map1818.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key1819; + @org.apache.thrift.annotation.Nullable java.lang.String _val1820; + for (int _i1821 = 0; _i1821 < _map1818.size; ++_i1821) { - _key1811 = iprot.readString(); - _val1812 = iprot.readString(); - struct.partitionSpecs.put(_key1811, _val1812); + _key1819 = iprot.readString(); + _val1820 = iprot.readString(); + struct.partitionSpecs.put(_key1819, _val1820); } } struct.setPartitionSpecsIsSet(true); @@ -133308,14 +133308,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1814 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1814.size); - @org.apache.thrift.annotation.Nullable Partition _elem1815; - for (int _i1816 = 0; _i1816 < _list1814.size; ++_i1816) + org.apache.thrift.protocol.TList _list1822 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1822.size); + @org.apache.thrift.annotation.Nullable Partition _elem1823; + for (int _i1824 = 0; _i1824 < _list1822.size; ++_i1824) { - _elem1815 = new Partition(); - _elem1815.read(iprot); - struct.success.add(_elem1815); + _elem1823 = new Partition(); + _elem1823.read(iprot); + struct.success.add(_elem1823); } iprot.readListEnd(); } @@ -133377,9 +133377,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1817 : struct.success) + for (Partition _iter1825 : struct.success) { - _iter1817.write(oprot); + _iter1825.write(oprot); } oprot.writeListEnd(); } @@ -133442,9 +133442,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1818 : struct.success) + for (Partition _iter1826 : struct.success) { - _iter1818.write(oprot); + _iter1826.write(oprot); } } } @@ -133468,14 +133468,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ java.util.BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1819 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1819.size); - @org.apache.thrift.annotation.Nullable Partition _elem1820; - for (int _i1821 = 0; _i1821 < _list1819.size; ++_i1821) + org.apache.thrift.protocol.TList _list1827 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1827.size); + @org.apache.thrift.annotation.Nullable Partition _elem1828; + for (int _i1829 = 0; _i1829 < _list1827.size; ++_i1829) { - _elem1820 = new Partition(); - _elem1820.read(iprot); - struct.success.add(_elem1820); + _elem1828 = new Partition(); + _elem1828.read(iprot); + struct.success.add(_elem1828); } } struct.setSuccessIsSet(true); @@ -134180,13 +134180,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1822 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1822.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1823; - for (int _i1824 = 0; _i1824 < _list1822.size; ++_i1824) + org.apache.thrift.protocol.TList _list1830 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1830.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1831; + for (int _i1832 = 0; _i1832 < _list1830.size; ++_i1832) { - _elem1823 = iprot.readString(); - struct.part_vals.add(_elem1823); + _elem1831 = iprot.readString(); + struct.part_vals.add(_elem1831); } iprot.readListEnd(); } @@ -134206,13 +134206,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1825 = iprot.readListBegin(); - struct.group_names = new java.util.ArrayList(_list1825.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1826; - for (int _i1827 = 0; _i1827 < _list1825.size; ++_i1827) + org.apache.thrift.protocol.TList _list1833 = iprot.readListBegin(); + struct.group_names = new java.util.ArrayList(_list1833.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1834; + for (int _i1835 = 0; _i1835 < _list1833.size; ++_i1835) { - _elem1826 = iprot.readString(); - struct.group_names.add(_elem1826); + _elem1834 = iprot.readString(); + struct.group_names.add(_elem1834); } iprot.readListEnd(); } @@ -134248,9 +134248,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1828 : struct.part_vals) + for (java.lang.String _iter1836 : struct.part_vals) { - oprot.writeString(_iter1828); + oprot.writeString(_iter1836); } oprot.writeListEnd(); } @@ -134265,9 +134265,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (java.lang.String _iter1829 : struct.group_names) + for (java.lang.String _iter1837 : struct.group_names) { - oprot.writeString(_iter1829); + oprot.writeString(_iter1837); } oprot.writeListEnd(); } @@ -134316,9 +134316,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1830 : struct.part_vals) + for (java.lang.String _iter1838 : struct.part_vals) { - oprot.writeString(_iter1830); + oprot.writeString(_iter1838); } } } @@ -134328,9 +134328,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (java.lang.String _iter1831 : struct.group_names) + for (java.lang.String _iter1839 : struct.group_names) { - oprot.writeString(_iter1831); + oprot.writeString(_iter1839); } } } @@ -134350,13 +134350,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1832 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1832.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1833; - for (int _i1834 = 0; _i1834 < _list1832.size; ++_i1834) + org.apache.thrift.protocol.TList _list1840 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1840.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1841; + for (int _i1842 = 0; _i1842 < _list1840.size; ++_i1842) { - _elem1833 = iprot.readString(); - struct.part_vals.add(_elem1833); + _elem1841 = iprot.readString(); + struct.part_vals.add(_elem1841); } } struct.setPart_valsIsSet(true); @@ -134367,13 +134367,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1835 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.group_names = new java.util.ArrayList(_list1835.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1836; - for (int _i1837 = 0; _i1837 < _list1835.size; ++_i1837) + org.apache.thrift.protocol.TList _list1843 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.group_names = new java.util.ArrayList(_list1843.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1844; + for (int _i1845 = 0; _i1845 < _list1843.size; ++_i1845) { - _elem1836 = iprot.readString(); - struct.group_names.add(_elem1836); + _elem1844 = iprot.readString(); + struct.group_names.add(_elem1844); } } struct.setGroup_namesIsSet(true); @@ -137160,14 +137160,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1838 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1838.size); - @org.apache.thrift.annotation.Nullable Partition _elem1839; - for (int _i1840 = 0; _i1840 < _list1838.size; ++_i1840) + org.apache.thrift.protocol.TList _list1846 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1846.size); + @org.apache.thrift.annotation.Nullable Partition _elem1847; + for (int _i1848 = 0; _i1848 < _list1846.size; ++_i1848) { - _elem1839 = new Partition(); - _elem1839.read(iprot); - struct.success.add(_elem1839); + _elem1847 = new Partition(); + _elem1847.read(iprot); + struct.success.add(_elem1847); } iprot.readListEnd(); } @@ -137211,9 +137211,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1841 : struct.success) + for (Partition _iter1849 : struct.success) { - _iter1841.write(oprot); + _iter1849.write(oprot); } oprot.writeListEnd(); } @@ -137260,9 +137260,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1842 : struct.success) + for (Partition _iter1850 : struct.success) { - _iter1842.write(oprot); + _iter1850.write(oprot); } } } @@ -137280,14 +137280,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1843 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1843.size); - @org.apache.thrift.annotation.Nullable Partition _elem1844; - for (int _i1845 = 0; _i1845 < _list1843.size; ++_i1845) + org.apache.thrift.protocol.TList _list1851 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1851.size); + @org.apache.thrift.annotation.Nullable Partition _elem1852; + for (int _i1853 = 0; _i1853 < _list1851.size; ++_i1853) { - _elem1844 = new Partition(); - _elem1844.read(iprot); - struct.success.add(_elem1844); + _elem1852 = new Partition(); + _elem1852.read(iprot); + struct.success.add(_elem1852); } } struct.setSuccessIsSet(true); @@ -138925,13 +138925,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1846 = iprot.readListBegin(); - struct.group_names = new java.util.ArrayList(_list1846.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1847; - for (int _i1848 = 0; _i1848 < _list1846.size; ++_i1848) + org.apache.thrift.protocol.TList _list1854 = iprot.readListBegin(); + struct.group_names = new java.util.ArrayList(_list1854.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1855; + for (int _i1856 = 0; _i1856 < _list1854.size; ++_i1856) { - _elem1847 = iprot.readString(); - struct.group_names.add(_elem1847); + _elem1855 = iprot.readString(); + struct.group_names.add(_elem1855); } iprot.readListEnd(); } @@ -138975,9 +138975,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (java.lang.String _iter1849 : struct.group_names) + for (java.lang.String _iter1857 : struct.group_names) { - oprot.writeString(_iter1849); + oprot.writeString(_iter1857); } oprot.writeListEnd(); } @@ -139032,9 +139032,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (java.lang.String _iter1850 : struct.group_names) + for (java.lang.String _iter1858 : struct.group_names) { - oprot.writeString(_iter1850); + oprot.writeString(_iter1858); } } } @@ -139062,13 +139062,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list1851 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.group_names = new java.util.ArrayList(_list1851.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1852; - for (int _i1853 = 0; _i1853 < _list1851.size; ++_i1853) + org.apache.thrift.protocol.TList _list1859 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.group_names = new java.util.ArrayList(_list1859.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1860; + for (int _i1861 = 0; _i1861 < _list1859.size; ++_i1861) { - _elem1852 = iprot.readString(); - struct.group_names.add(_elem1852); + _elem1860 = iprot.readString(); + struct.group_names.add(_elem1860); } } struct.setGroup_namesIsSet(true); @@ -139560,14 +139560,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1854 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1854.size); - @org.apache.thrift.annotation.Nullable Partition _elem1855; - for (int _i1856 = 0; _i1856 < _list1854.size; ++_i1856) + org.apache.thrift.protocol.TList _list1862 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1862.size); + @org.apache.thrift.annotation.Nullable Partition _elem1863; + for (int _i1864 = 0; _i1864 < _list1862.size; ++_i1864) { - _elem1855 = new Partition(); - _elem1855.read(iprot); - struct.success.add(_elem1855); + _elem1863 = new Partition(); + _elem1863.read(iprot); + struct.success.add(_elem1863); } iprot.readListEnd(); } @@ -139611,9 +139611,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1857 : struct.success) + for (Partition _iter1865 : struct.success) { - _iter1857.write(oprot); + _iter1865.write(oprot); } oprot.writeListEnd(); } @@ -139660,9 +139660,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1858 : struct.success) + for (Partition _iter1866 : struct.success) { - _iter1858.write(oprot); + _iter1866.write(oprot); } } } @@ -139680,14 +139680,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1859 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1859.size); - @org.apache.thrift.annotation.Nullable Partition _elem1860; - for (int _i1861 = 0; _i1861 < _list1859.size; ++_i1861) + org.apache.thrift.protocol.TList _list1867 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1867.size); + @org.apache.thrift.annotation.Nullable Partition _elem1868; + for (int _i1869 = 0; _i1869 < _list1867.size; ++_i1869) { - _elem1860 = new Partition(); - _elem1860.read(iprot); - struct.success.add(_elem1860); + _elem1868 = new Partition(); + _elem1868.read(iprot); + struct.success.add(_elem1868); } } struct.setSuccessIsSet(true); @@ -140756,14 +140756,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1862 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1862.size); - @org.apache.thrift.annotation.Nullable PartitionSpec _elem1863; - for (int _i1864 = 0; _i1864 < _list1862.size; ++_i1864) + org.apache.thrift.protocol.TList _list1870 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1870.size); + @org.apache.thrift.annotation.Nullable PartitionSpec _elem1871; + for (int _i1872 = 0; _i1872 < _list1870.size; ++_i1872) { - _elem1863 = new PartitionSpec(); - _elem1863.read(iprot); - struct.success.add(_elem1863); + _elem1871 = new PartitionSpec(); + _elem1871.read(iprot); + struct.success.add(_elem1871); } iprot.readListEnd(); } @@ -140807,9 +140807,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1865 : struct.success) + for (PartitionSpec _iter1873 : struct.success) { - _iter1865.write(oprot); + _iter1873.write(oprot); } oprot.writeListEnd(); } @@ -140856,9 +140856,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1866 : struct.success) + for (PartitionSpec _iter1874 : struct.success) { - _iter1866.write(oprot); + _iter1874.write(oprot); } } } @@ -140876,14 +140876,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1867 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1867.size); - @org.apache.thrift.annotation.Nullable PartitionSpec _elem1868; - for (int _i1869 = 0; _i1869 < _list1867.size; ++_i1869) + org.apache.thrift.protocol.TList _list1875 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1875.size); + @org.apache.thrift.annotation.Nullable PartitionSpec _elem1876; + for (int _i1877 = 0; _i1877 < _list1875.size; ++_i1877) { - _elem1868 = new PartitionSpec(); - _elem1868.read(iprot); - struct.success.add(_elem1868); + _elem1876 = new PartitionSpec(); + _elem1876.read(iprot); + struct.success.add(_elem1876); } } struct.setSuccessIsSet(true); @@ -141949,13 +141949,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1870 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1870.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1871; - for (int _i1872 = 0; _i1872 < _list1870.size; ++_i1872) + org.apache.thrift.protocol.TList _list1878 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1878.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1879; + for (int _i1880 = 0; _i1880 < _list1878.size; ++_i1880) { - _elem1871 = iprot.readString(); - struct.success.add(_elem1871); + _elem1879 = iprot.readString(); + struct.success.add(_elem1879); } iprot.readListEnd(); } @@ -141999,9 +141999,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1873 : struct.success) + for (java.lang.String _iter1881 : struct.success) { - oprot.writeString(_iter1873); + oprot.writeString(_iter1881); } oprot.writeListEnd(); } @@ -142048,9 +142048,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1874 : struct.success) + for (java.lang.String _iter1882 : struct.success) { - oprot.writeString(_iter1874); + oprot.writeString(_iter1882); } } } @@ -142068,13 +142068,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1875 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1875.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1876; - for (int _i1877 = 0; _i1877 < _list1875.size; ++_i1877) + org.apache.thrift.protocol.TList _list1883 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1883.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1884; + for (int _i1885 = 0; _i1885 < _list1883.size; ++_i1885) { - _elem1876 = iprot.readString(); - struct.success.add(_elem1876); + _elem1884 = iprot.readString(); + struct.success.add(_elem1884); } } struct.setSuccessIsSet(true); @@ -143615,13 +143615,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1878 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1878.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1879; - for (int _i1880 = 0; _i1880 < _list1878.size; ++_i1880) + org.apache.thrift.protocol.TList _list1886 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1886.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1887; + for (int _i1888 = 0; _i1888 < _list1886.size; ++_i1888) { - _elem1879 = iprot.readString(); - struct.part_vals.add(_elem1879); + _elem1887 = iprot.readString(); + struct.part_vals.add(_elem1887); } iprot.readListEnd(); } @@ -143665,9 +143665,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1881 : struct.part_vals) + for (java.lang.String _iter1889 : struct.part_vals) { - oprot.writeString(_iter1881); + oprot.writeString(_iter1889); } oprot.writeListEnd(); } @@ -143716,9 +143716,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1882 : struct.part_vals) + for (java.lang.String _iter1890 : struct.part_vals) { - oprot.writeString(_iter1882); + oprot.writeString(_iter1890); } } } @@ -143741,13 +143741,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1883 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1883.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1884; - for (int _i1885 = 0; _i1885 < _list1883.size; ++_i1885) + org.apache.thrift.protocol.TList _list1891 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1891.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1892; + for (int _i1893 = 0; _i1893 < _list1891.size; ++_i1893) { - _elem1884 = iprot.readString(); - struct.part_vals.add(_elem1884); + _elem1892 = iprot.readString(); + struct.part_vals.add(_elem1892); } } struct.setPart_valsIsSet(true); @@ -144243,14 +144243,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1886 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1886.size); - @org.apache.thrift.annotation.Nullable Partition _elem1887; - for (int _i1888 = 0; _i1888 < _list1886.size; ++_i1888) + org.apache.thrift.protocol.TList _list1894 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1894.size); + @org.apache.thrift.annotation.Nullable Partition _elem1895; + for (int _i1896 = 0; _i1896 < _list1894.size; ++_i1896) { - _elem1887 = new Partition(); - _elem1887.read(iprot); - struct.success.add(_elem1887); + _elem1895 = new Partition(); + _elem1895.read(iprot); + struct.success.add(_elem1895); } iprot.readListEnd(); } @@ -144294,9 +144294,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1889 : struct.success) + for (Partition _iter1897 : struct.success) { - _iter1889.write(oprot); + _iter1897.write(oprot); } oprot.writeListEnd(); } @@ -144343,9 +144343,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1890 : struct.success) + for (Partition _iter1898 : struct.success) { - _iter1890.write(oprot); + _iter1898.write(oprot); } } } @@ -144363,14 +144363,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1891 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1891.size); - @org.apache.thrift.annotation.Nullable Partition _elem1892; - for (int _i1893 = 0; _i1893 < _list1891.size; ++_i1893) + org.apache.thrift.protocol.TList _list1899 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1899.size); + @org.apache.thrift.annotation.Nullable Partition _elem1900; + for (int _i1901 = 0; _i1901 < _list1899.size; ++_i1901) { - _elem1892 = new Partition(); - _elem1892.read(iprot); - struct.success.add(_elem1892); + _elem1900 = new Partition(); + _elem1900.read(iprot); + struct.success.add(_elem1900); } } struct.setSuccessIsSet(true); @@ -145145,13 +145145,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1894 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1894.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1895; - for (int _i1896 = 0; _i1896 < _list1894.size; ++_i1896) + org.apache.thrift.protocol.TList _list1902 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1902.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1903; + for (int _i1904 = 0; _i1904 < _list1902.size; ++_i1904) { - _elem1895 = iprot.readString(); - struct.part_vals.add(_elem1895); + _elem1903 = iprot.readString(); + struct.part_vals.add(_elem1903); } iprot.readListEnd(); } @@ -145179,13 +145179,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1897 = iprot.readListBegin(); - struct.group_names = new java.util.ArrayList(_list1897.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1898; - for (int _i1899 = 0; _i1899 < _list1897.size; ++_i1899) + org.apache.thrift.protocol.TList _list1905 = iprot.readListBegin(); + struct.group_names = new java.util.ArrayList(_list1905.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1906; + for (int _i1907 = 0; _i1907 < _list1905.size; ++_i1907) { - _elem1898 = iprot.readString(); - struct.group_names.add(_elem1898); + _elem1906 = iprot.readString(); + struct.group_names.add(_elem1906); } iprot.readListEnd(); } @@ -145221,9 +145221,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1900 : struct.part_vals) + for (java.lang.String _iter1908 : struct.part_vals) { - oprot.writeString(_iter1900); + oprot.writeString(_iter1908); } oprot.writeListEnd(); } @@ -145241,9 +145241,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (java.lang.String _iter1901 : struct.group_names) + for (java.lang.String _iter1909 : struct.group_names) { - oprot.writeString(_iter1901); + oprot.writeString(_iter1909); } oprot.writeListEnd(); } @@ -145295,9 +145295,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1902 : struct.part_vals) + for (java.lang.String _iter1910 : struct.part_vals) { - oprot.writeString(_iter1902); + oprot.writeString(_iter1910); } } } @@ -145310,9 +145310,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (java.lang.String _iter1903 : struct.group_names) + for (java.lang.String _iter1911 : struct.group_names) { - oprot.writeString(_iter1903); + oprot.writeString(_iter1911); } } } @@ -145332,13 +145332,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1904 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1904.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1905; - for (int _i1906 = 0; _i1906 < _list1904.size; ++_i1906) + org.apache.thrift.protocol.TList _list1912 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1912.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1913; + for (int _i1914 = 0; _i1914 < _list1912.size; ++_i1914) { - _elem1905 = iprot.readString(); - struct.part_vals.add(_elem1905); + _elem1913 = iprot.readString(); + struct.part_vals.add(_elem1913); } } struct.setPart_valsIsSet(true); @@ -145353,13 +145353,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list1907 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.group_names = new java.util.ArrayList(_list1907.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1908; - for (int _i1909 = 0; _i1909 < _list1907.size; ++_i1909) + org.apache.thrift.protocol.TList _list1915 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.group_names = new java.util.ArrayList(_list1915.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1916; + for (int _i1917 = 0; _i1917 < _list1915.size; ++_i1917) { - _elem1908 = iprot.readString(); - struct.group_names.add(_elem1908); + _elem1916 = iprot.readString(); + struct.group_names.add(_elem1916); } } struct.setGroup_namesIsSet(true); @@ -145851,14 +145851,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1910 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1910.size); - @org.apache.thrift.annotation.Nullable Partition _elem1911; - for (int _i1912 = 0; _i1912 < _list1910.size; ++_i1912) + org.apache.thrift.protocol.TList _list1918 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1918.size); + @org.apache.thrift.annotation.Nullable Partition _elem1919; + for (int _i1920 = 0; _i1920 < _list1918.size; ++_i1920) { - _elem1911 = new Partition(); - _elem1911.read(iprot); - struct.success.add(_elem1911); + _elem1919 = new Partition(); + _elem1919.read(iprot); + struct.success.add(_elem1919); } iprot.readListEnd(); } @@ -145902,9 +145902,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1913 : struct.success) + for (Partition _iter1921 : struct.success) { - _iter1913.write(oprot); + _iter1921.write(oprot); } oprot.writeListEnd(); } @@ -145951,9 +145951,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1914 : struct.success) + for (Partition _iter1922 : struct.success) { - _iter1914.write(oprot); + _iter1922.write(oprot); } } } @@ -145971,14 +145971,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1915 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1915.size); - @org.apache.thrift.annotation.Nullable Partition _elem1916; - for (int _i1917 = 0; _i1917 < _list1915.size; ++_i1917) + org.apache.thrift.protocol.TList _list1923 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1923.size); + @org.apache.thrift.annotation.Nullable Partition _elem1924; + for (int _i1925 = 0; _i1925 < _list1923.size; ++_i1925) { - _elem1916 = new Partition(); - _elem1916.read(iprot); - struct.success.add(_elem1916); + _elem1924 = new Partition(); + _elem1924.read(iprot); + struct.success.add(_elem1924); } } struct.setSuccessIsSet(true); @@ -147519,13 +147519,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1918 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1918.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1919; - for (int _i1920 = 0; _i1920 < _list1918.size; ++_i1920) + org.apache.thrift.protocol.TList _list1926 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list1926.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1927; + for (int _i1928 = 0; _i1928 < _list1926.size; ++_i1928) { - _elem1919 = iprot.readString(); - struct.part_vals.add(_elem1919); + _elem1927 = iprot.readString(); + struct.part_vals.add(_elem1927); } iprot.readListEnd(); } @@ -147569,9 +147569,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter1921 : struct.part_vals) + for (java.lang.String _iter1929 : struct.part_vals) { - oprot.writeString(_iter1921); + oprot.writeString(_iter1929); } oprot.writeListEnd(); } @@ -147620,9 +147620,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter1922 : struct.part_vals) + for (java.lang.String _iter1930 : struct.part_vals) { - oprot.writeString(_iter1922); + oprot.writeString(_iter1930); } } } @@ -147645,13 +147645,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1923 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list1923.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1924; - for (int _i1925 = 0; _i1925 < _list1923.size; ++_i1925) + org.apache.thrift.protocol.TList _list1931 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list1931.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1932; + for (int _i1933 = 0; _i1933 < _list1931.size; ++_i1933) { - _elem1924 = iprot.readString(); - struct.part_vals.add(_elem1924); + _elem1932 = iprot.readString(); + struct.part_vals.add(_elem1932); } } struct.setPart_valsIsSet(true); @@ -148144,13 +148144,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1926 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1926.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1927; - for (int _i1928 = 0; _i1928 < _list1926.size; ++_i1928) + org.apache.thrift.protocol.TList _list1934 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1934.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1935; + for (int _i1936 = 0; _i1936 < _list1934.size; ++_i1936) { - _elem1927 = iprot.readString(); - struct.success.add(_elem1927); + _elem1935 = iprot.readString(); + struct.success.add(_elem1935); } iprot.readListEnd(); } @@ -148194,9 +148194,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1929 : struct.success) + for (java.lang.String _iter1937 : struct.success) { - oprot.writeString(_iter1929); + oprot.writeString(_iter1937); } oprot.writeListEnd(); } @@ -148243,9 +148243,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1930 : struct.success) + for (java.lang.String _iter1938 : struct.success) { - oprot.writeString(_iter1930); + oprot.writeString(_iter1938); } } } @@ -148263,13 +148263,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1931 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1931.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1932; - for (int _i1933 = 0; _i1933 < _list1931.size; ++_i1933) + org.apache.thrift.protocol.TList _list1939 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1939.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1940; + for (int _i1941 = 0; _i1941 < _list1939.size; ++_i1941) { - _elem1932 = iprot.readString(); - struct.success.add(_elem1932); + _elem1940 = iprot.readString(); + struct.success.add(_elem1940); } } struct.setSuccessIsSet(true); @@ -150081,13 +150081,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1934 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1934.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1935; - for (int _i1936 = 0; _i1936 < _list1934.size; ++_i1936) + org.apache.thrift.protocol.TList _list1942 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1942.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1943; + for (int _i1944 = 0; _i1944 < _list1942.size; ++_i1944) { - _elem1935 = iprot.readString(); - struct.success.add(_elem1935); + _elem1943 = iprot.readString(); + struct.success.add(_elem1943); } iprot.readListEnd(); } @@ -150131,9 +150131,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter1937 : struct.success) + for (java.lang.String _iter1945 : struct.success) { - oprot.writeString(_iter1937); + oprot.writeString(_iter1945); } oprot.writeListEnd(); } @@ -150180,9 +150180,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter1938 : struct.success) + for (java.lang.String _iter1946 : struct.success) { - oprot.writeString(_iter1938); + oprot.writeString(_iter1946); } } } @@ -150200,13 +150200,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1939 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list1939.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1940; - for (int _i1941 = 0; _i1941 < _list1939.size; ++_i1941) + org.apache.thrift.protocol.TList _list1947 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list1947.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1948; + for (int _i1949 = 0; _i1949 < _list1947.size; ++_i1949) { - _elem1940 = iprot.readString(); - struct.success.add(_elem1940); + _elem1948 = iprot.readString(); + struct.success.add(_elem1948); } } struct.setSuccessIsSet(true); @@ -151379,14 +151379,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1942 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1942.size); - @org.apache.thrift.annotation.Nullable Partition _elem1943; - for (int _i1944 = 0; _i1944 < _list1942.size; ++_i1944) + org.apache.thrift.protocol.TList _list1950 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1950.size); + @org.apache.thrift.annotation.Nullable Partition _elem1951; + for (int _i1952 = 0; _i1952 < _list1950.size; ++_i1952) { - _elem1943 = new Partition(); - _elem1943.read(iprot); - struct.success.add(_elem1943); + _elem1951 = new Partition(); + _elem1951.read(iprot); + struct.success.add(_elem1951); } iprot.readListEnd(); } @@ -151430,9 +151430,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1945 : struct.success) + for (Partition _iter1953 : struct.success) { - _iter1945.write(oprot); + _iter1953.write(oprot); } oprot.writeListEnd(); } @@ -151479,9 +151479,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1946 : struct.success) + for (Partition _iter1954 : struct.success) { - _iter1946.write(oprot); + _iter1954.write(oprot); } } } @@ -151499,14 +151499,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1947 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1947.size); - @org.apache.thrift.annotation.Nullable Partition _elem1948; - for (int _i1949 = 0; _i1949 < _list1947.size; ++_i1949) + org.apache.thrift.protocol.TList _list1955 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1955.size); + @org.apache.thrift.annotation.Nullable Partition _elem1956; + for (int _i1957 = 0; _i1957 < _list1955.size; ++_i1957) { - _elem1948 = new Partition(); - _elem1948.read(iprot); - struct.success.add(_elem1948); + _elem1956 = new Partition(); + _elem1956.read(iprot); + struct.success.add(_elem1956); } } struct.setSuccessIsSet(true); @@ -152375,14 +152375,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1950 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1950.size); - @org.apache.thrift.annotation.Nullable Partition _elem1951; - for (int _i1952 = 0; _i1952 < _list1950.size; ++_i1952) + org.apache.thrift.protocol.TList _list1958 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1958.size); + @org.apache.thrift.annotation.Nullable Partition _elem1959; + for (int _i1960 = 0; _i1960 < _list1958.size; ++_i1960) { - _elem1951 = new Partition(); - _elem1951.read(iprot); - struct.success.add(_elem1951); + _elem1959 = new Partition(); + _elem1959.read(iprot); + struct.success.add(_elem1959); } iprot.readListEnd(); } @@ -152426,9 +152426,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1953 : struct.success) + for (Partition _iter1961 : struct.success) { - _iter1953.write(oprot); + _iter1961.write(oprot); } oprot.writeListEnd(); } @@ -152475,9 +152475,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1954 : struct.success) + for (Partition _iter1962 : struct.success) { - _iter1954.write(oprot); + _iter1962.write(oprot); } } } @@ -152495,14 +152495,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1955 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1955.size); - @org.apache.thrift.annotation.Nullable Partition _elem1956; - for (int _i1957 = 0; _i1957 < _list1955.size; ++_i1957) + org.apache.thrift.protocol.TList _list1963 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1963.size); + @org.apache.thrift.annotation.Nullable Partition _elem1964; + for (int _i1965 = 0; _i1965 < _list1963.size; ++_i1965) { - _elem1956 = new Partition(); - _elem1956.read(iprot); - struct.success.add(_elem1956); + _elem1964 = new Partition(); + _elem1964.read(iprot); + struct.success.add(_elem1964); } } struct.setSuccessIsSet(true); @@ -153675,14 +153675,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1958 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1958.size); - @org.apache.thrift.annotation.Nullable PartitionSpec _elem1959; - for (int _i1960 = 0; _i1960 < _list1958.size; ++_i1960) + org.apache.thrift.protocol.TList _list1966 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1966.size); + @org.apache.thrift.annotation.Nullable PartitionSpec _elem1967; + for (int _i1968 = 0; _i1968 < _list1966.size; ++_i1968) { - _elem1959 = new PartitionSpec(); - _elem1959.read(iprot); - struct.success.add(_elem1959); + _elem1967 = new PartitionSpec(); + _elem1967.read(iprot); + struct.success.add(_elem1967); } iprot.readListEnd(); } @@ -153726,9 +153726,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter1961 : struct.success) + for (PartitionSpec _iter1969 : struct.success) { - _iter1961.write(oprot); + _iter1969.write(oprot); } oprot.writeListEnd(); } @@ -153775,9 +153775,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter1962 : struct.success) + for (PartitionSpec _iter1970 : struct.success) { - _iter1962.write(oprot); + _iter1970.write(oprot); } } } @@ -153795,14 +153795,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1963 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1963.size); - @org.apache.thrift.annotation.Nullable PartitionSpec _elem1964; - for (int _i1965 = 0; _i1965 < _list1963.size; ++_i1965) + org.apache.thrift.protocol.TList _list1971 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1971.size); + @org.apache.thrift.annotation.Nullable PartitionSpec _elem1972; + for (int _i1973 = 0; _i1973 < _list1971.size; ++_i1973) { - _elem1964 = new PartitionSpec(); - _elem1964.read(iprot); - struct.success.add(_elem1964); + _elem1972 = new PartitionSpec(); + _elem1972.read(iprot); + struct.success.add(_elem1972); } } struct.setSuccessIsSet(true); @@ -157350,13 +157350,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1966 = iprot.readListBegin(); - struct.names = new java.util.ArrayList(_list1966.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1967; - for (int _i1968 = 0; _i1968 < _list1966.size; ++_i1968) + org.apache.thrift.protocol.TList _list1974 = iprot.readListBegin(); + struct.names = new java.util.ArrayList(_list1974.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1975; + for (int _i1976 = 0; _i1976 < _list1974.size; ++_i1976) { - _elem1967 = iprot.readString(); - struct.names.add(_elem1967); + _elem1975 = iprot.readString(); + struct.names.add(_elem1975); } iprot.readListEnd(); } @@ -157392,9 +157392,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (java.lang.String _iter1969 : struct.names) + for (java.lang.String _iter1977 : struct.names) { - oprot.writeString(_iter1969); + oprot.writeString(_iter1977); } oprot.writeListEnd(); } @@ -157437,9 +157437,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (java.lang.String _iter1970 : struct.names) + for (java.lang.String _iter1978 : struct.names) { - oprot.writeString(_iter1970); + oprot.writeString(_iter1978); } } } @@ -157459,13 +157459,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1971 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.names = new java.util.ArrayList(_list1971.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1972; - for (int _i1973 = 0; _i1973 < _list1971.size; ++_i1973) + org.apache.thrift.protocol.TList _list1979 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.names = new java.util.ArrayList(_list1979.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem1980; + for (int _i1981 = 0; _i1981 < _list1979.size; ++_i1981) { - _elem1972 = iprot.readString(); - struct.names.add(_elem1972); + _elem1980 = iprot.readString(); + struct.names.add(_elem1980); } } struct.setNamesIsSet(true); @@ -158038,14 +158038,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1974 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list1974.size); - @org.apache.thrift.annotation.Nullable Partition _elem1975; - for (int _i1976 = 0; _i1976 < _list1974.size; ++_i1976) + org.apache.thrift.protocol.TList _list1982 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list1982.size); + @org.apache.thrift.annotation.Nullable Partition _elem1983; + for (int _i1984 = 0; _i1984 < _list1982.size; ++_i1984) { - _elem1975 = new Partition(); - _elem1975.read(iprot); - struct.success.add(_elem1975); + _elem1983 = new Partition(); + _elem1983.read(iprot); + struct.success.add(_elem1983); } iprot.readListEnd(); } @@ -158098,9 +158098,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter1977 : struct.success) + for (Partition _iter1985 : struct.success) { - _iter1977.write(oprot); + _iter1985.write(oprot); } oprot.writeListEnd(); } @@ -158155,9 +158155,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter1978 : struct.success) + for (Partition _iter1986 : struct.success) { - _iter1978.write(oprot); + _iter1986.write(oprot); } } } @@ -158178,14 +158178,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na java.util.BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1979 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list1979.size); - @org.apache.thrift.annotation.Nullable Partition _elem1980; - for (int _i1981 = 0; _i1981 < _list1979.size; ++_i1981) + org.apache.thrift.protocol.TList _list1987 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list1987.size); + @org.apache.thrift.annotation.Nullable Partition _elem1988; + for (int _i1989 = 0; _i1989 < _list1987.size; ++_i1989) { - _elem1980 = new Partition(); - _elem1980.read(iprot); - struct.success.add(_elem1980); + _elem1988 = new Partition(); + _elem1988.read(iprot); + struct.success.add(_elem1988); } } struct.setSuccessIsSet(true); @@ -162689,14 +162689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1982 = iprot.readListBegin(); - struct.new_parts = new java.util.ArrayList(_list1982.size); - @org.apache.thrift.annotation.Nullable Partition _elem1983; - for (int _i1984 = 0; _i1984 < _list1982.size; ++_i1984) + org.apache.thrift.protocol.TList _list1990 = iprot.readListBegin(); + struct.new_parts = new java.util.ArrayList(_list1990.size); + @org.apache.thrift.annotation.Nullable Partition _elem1991; + for (int _i1992 = 0; _i1992 < _list1990.size; ++_i1992) { - _elem1983 = new Partition(); - _elem1983.read(iprot); - struct.new_parts.add(_elem1983); + _elem1991 = new Partition(); + _elem1991.read(iprot); + struct.new_parts.add(_elem1991); } iprot.readListEnd(); } @@ -162732,9 +162732,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1985 : struct.new_parts) + for (Partition _iter1993 : struct.new_parts) { - _iter1985.write(oprot); + _iter1993.write(oprot); } oprot.writeListEnd(); } @@ -162777,9 +162777,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1986 : struct.new_parts) + for (Partition _iter1994 : struct.new_parts) { - _iter1986.write(oprot); + _iter1994.write(oprot); } } } @@ -162799,14 +162799,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1987 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.new_parts = new java.util.ArrayList(_list1987.size); - @org.apache.thrift.annotation.Nullable Partition _elem1988; - for (int _i1989 = 0; _i1989 < _list1987.size; ++_i1989) + org.apache.thrift.protocol.TList _list1995 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.new_parts = new java.util.ArrayList(_list1995.size); + @org.apache.thrift.annotation.Nullable Partition _elem1996; + for (int _i1997 = 0; _i1997 < _list1995.size; ++_i1997) { - _elem1988 = new Partition(); - _elem1988.read(iprot); - struct.new_parts.add(_elem1988); + _elem1996 = new Partition(); + _elem1996.read(iprot); + struct.new_parts.add(_elem1996); } } struct.setNew_partsIsSet(true); @@ -163868,14 +163868,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1990 = iprot.readListBegin(); - struct.new_parts = new java.util.ArrayList(_list1990.size); - @org.apache.thrift.annotation.Nullable Partition _elem1991; - for (int _i1992 = 0; _i1992 < _list1990.size; ++_i1992) + org.apache.thrift.protocol.TList _list1998 = iprot.readListBegin(); + struct.new_parts = new java.util.ArrayList(_list1998.size); + @org.apache.thrift.annotation.Nullable Partition _elem1999; + for (int _i2000 = 0; _i2000 < _list1998.size; ++_i2000) { - _elem1991 = new Partition(); - _elem1991.read(iprot); - struct.new_parts.add(_elem1991); + _elem1999 = new Partition(); + _elem1999.read(iprot); + struct.new_parts.add(_elem1999); } iprot.readListEnd(); } @@ -163920,9 +163920,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter1993 : struct.new_parts) + for (Partition _iter2001 : struct.new_parts) { - _iter1993.write(oprot); + _iter2001.write(oprot); } oprot.writeListEnd(); } @@ -163973,9 +163973,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter1994 : struct.new_parts) + for (Partition _iter2002 : struct.new_parts) { - _iter1994.write(oprot); + _iter2002.write(oprot); } } } @@ -163998,14 +163998,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1995 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.new_parts = new java.util.ArrayList(_list1995.size); - @org.apache.thrift.annotation.Nullable Partition _elem1996; - for (int _i1997 = 0; _i1997 < _list1995.size; ++_i1997) + org.apache.thrift.protocol.TList _list2003 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.new_parts = new java.util.ArrayList(_list2003.size); + @org.apache.thrift.annotation.Nullable Partition _elem2004; + for (int _i2005 = 0; _i2005 < _list2003.size; ++_i2005) { - _elem1996 = new Partition(); - _elem1996.read(iprot); - struct.new_parts.add(_elem1996); + _elem2004 = new Partition(); + _elem2004.read(iprot); + struct.new_parts.add(_elem2004); } } struct.setNew_partsIsSet(true); @@ -167169,13 +167169,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1998 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list1998.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem1999; - for (int _i2000 = 0; _i2000 < _list1998.size; ++_i2000) + org.apache.thrift.protocol.TList _list2006 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list2006.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2007; + for (int _i2008 = 0; _i2008 < _list2006.size; ++_i2008) { - _elem1999 = iprot.readString(); - struct.part_vals.add(_elem1999); + _elem2007 = iprot.readString(); + struct.part_vals.add(_elem2007); } iprot.readListEnd(); } @@ -167220,9 +167220,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter2001 : struct.part_vals) + for (java.lang.String _iter2009 : struct.part_vals) { - oprot.writeString(_iter2001); + oprot.writeString(_iter2009); } oprot.writeListEnd(); } @@ -167273,9 +167273,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter2002 : struct.part_vals) + for (java.lang.String _iter2010 : struct.part_vals) { - oprot.writeString(_iter2002); + oprot.writeString(_iter2010); } } } @@ -167298,13 +167298,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list2003 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list2003.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2004; - for (int _i2005 = 0; _i2005 < _list2003.size; ++_i2005) + org.apache.thrift.protocol.TList _list2011 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list2011.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2012; + for (int _i2013 = 0; _i2013 < _list2011.size; ++_i2013) { - _elem2004 = iprot.readString(); - struct.part_vals.add(_elem2004); + _elem2012 = iprot.readString(); + struct.part_vals.add(_elem2012); } } struct.setPart_valsIsSet(true); @@ -169130,13 +169130,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2006 = iprot.readListBegin(); - struct.part_vals = new java.util.ArrayList(_list2006.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2007; - for (int _i2008 = 0; _i2008 < _list2006.size; ++_i2008) + org.apache.thrift.protocol.TList _list2014 = iprot.readListBegin(); + struct.part_vals = new java.util.ArrayList(_list2014.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2015; + for (int _i2016 = 0; _i2016 < _list2014.size; ++_i2016) { - _elem2007 = iprot.readString(); - struct.part_vals.add(_elem2007); + _elem2015 = iprot.readString(); + struct.part_vals.add(_elem2015); } iprot.readListEnd(); } @@ -169170,9 +169170,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.lang.String _iter2009 : struct.part_vals) + for (java.lang.String _iter2017 : struct.part_vals) { - oprot.writeString(_iter2009); + oprot.writeString(_iter2017); } oprot.writeListEnd(); } @@ -169209,9 +169209,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.lang.String _iter2010 : struct.part_vals) + for (java.lang.String _iter2018 : struct.part_vals) { - oprot.writeString(_iter2010); + oprot.writeString(_iter2018); } } } @@ -169226,13 +169226,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2011 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.ArrayList(_list2011.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2012; - for (int _i2013 = 0; _i2013 < _list2011.size; ++_i2013) + org.apache.thrift.protocol.TList _list2019 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.ArrayList(_list2019.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2020; + for (int _i2021 = 0; _i2021 < _list2019.size; ++_i2021) { - _elem2012 = iprot.readString(); - struct.part_vals.add(_elem2012); + _elem2020 = iprot.readString(); + struct.part_vals.add(_elem2020); } } struct.setPart_valsIsSet(true); @@ -171405,13 +171405,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2014 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2014.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2015; - for (int _i2016 = 0; _i2016 < _list2014.size; ++_i2016) + org.apache.thrift.protocol.TList _list2022 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2022.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2023; + for (int _i2024 = 0; _i2024 < _list2022.size; ++_i2024) { - _elem2015 = iprot.readString(); - struct.success.add(_elem2015); + _elem2023 = iprot.readString(); + struct.success.add(_elem2023); } iprot.readListEnd(); } @@ -171446,9 +171446,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2017 : struct.success) + for (java.lang.String _iter2025 : struct.success) { - oprot.writeString(_iter2017); + oprot.writeString(_iter2025); } oprot.writeListEnd(); } @@ -171487,9 +171487,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2018 : struct.success) + for (java.lang.String _iter2026 : struct.success) { - oprot.writeString(_iter2018); + oprot.writeString(_iter2026); } } } @@ -171504,13 +171504,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2019 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2019.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2020; - for (int _i2021 = 0; _i2021 < _list2019.size; ++_i2021) + org.apache.thrift.protocol.TList _list2027 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2027.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2028; + for (int _i2029 = 0; _i2029 < _list2027.size; ++_i2029) { - _elem2020 = iprot.readString(); - struct.success.add(_elem2020); + _elem2028 = iprot.readString(); + struct.success.add(_elem2028); } } struct.setSuccessIsSet(true); @@ -172281,15 +172281,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map2022 = iprot.readMapBegin(); - struct.success = new java.util.HashMap(2*_map2022.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2023; - @org.apache.thrift.annotation.Nullable java.lang.String _val2024; - for (int _i2025 = 0; _i2025 < _map2022.size; ++_i2025) + org.apache.thrift.protocol.TMap _map2030 = iprot.readMapBegin(); + struct.success = new java.util.HashMap(2*_map2030.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2031; + @org.apache.thrift.annotation.Nullable java.lang.String _val2032; + for (int _i2033 = 0; _i2033 < _map2030.size; ++_i2033) { - _key2023 = iprot.readString(); - _val2024 = iprot.readString(); - struct.success.put(_key2023, _val2024); + _key2031 = iprot.readString(); + _val2032 = iprot.readString(); + struct.success.put(_key2031, _val2032); } iprot.readMapEnd(); } @@ -172324,10 +172324,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.util.Map.Entry _iter2026 : struct.success.entrySet()) + for (java.util.Map.Entry _iter2034 : struct.success.entrySet()) { - oprot.writeString(_iter2026.getKey()); - oprot.writeString(_iter2026.getValue()); + oprot.writeString(_iter2034.getKey()); + oprot.writeString(_iter2034.getValue()); } oprot.writeMapEnd(); } @@ -172366,10 +172366,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.util.Map.Entry _iter2027 : struct.success.entrySet()) + for (java.util.Map.Entry _iter2035 : struct.success.entrySet()) { - oprot.writeString(_iter2027.getKey()); - oprot.writeString(_iter2027.getValue()); + oprot.writeString(_iter2035.getKey()); + oprot.writeString(_iter2035.getValue()); } } } @@ -172384,15 +172384,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map2028 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.HashMap(2*_map2028.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2029; - @org.apache.thrift.annotation.Nullable java.lang.String _val2030; - for (int _i2031 = 0; _i2031 < _map2028.size; ++_i2031) + org.apache.thrift.protocol.TMap _map2036 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.HashMap(2*_map2036.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2037; + @org.apache.thrift.annotation.Nullable java.lang.String _val2038; + for (int _i2039 = 0; _i2039 < _map2036.size; ++_i2039) { - _key2029 = iprot.readString(); - _val2030 = iprot.readString(); - struct.success.put(_key2029, _val2030); + _key2037 = iprot.readString(); + _val2038 = iprot.readString(); + struct.success.put(_key2037, _val2038); } } struct.setSuccessIsSet(true); @@ -172991,15 +172991,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map2032 = iprot.readMapBegin(); - struct.part_vals = new java.util.HashMap(2*_map2032.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2033; - @org.apache.thrift.annotation.Nullable java.lang.String _val2034; - for (int _i2035 = 0; _i2035 < _map2032.size; ++_i2035) + org.apache.thrift.protocol.TMap _map2040 = iprot.readMapBegin(); + struct.part_vals = new java.util.HashMap(2*_map2040.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2041; + @org.apache.thrift.annotation.Nullable java.lang.String _val2042; + for (int _i2043 = 0; _i2043 < _map2040.size; ++_i2043) { - _key2033 = iprot.readString(); - _val2034 = iprot.readString(); - struct.part_vals.put(_key2033, _val2034); + _key2041 = iprot.readString(); + _val2042 = iprot.readString(); + struct.part_vals.put(_key2041, _val2042); } iprot.readMapEnd(); } @@ -173043,10 +173043,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.util.Map.Entry _iter2036 : struct.part_vals.entrySet()) + for (java.util.Map.Entry _iter2044 : struct.part_vals.entrySet()) { - oprot.writeString(_iter2036.getKey()); - oprot.writeString(_iter2036.getValue()); + oprot.writeString(_iter2044.getKey()); + oprot.writeString(_iter2044.getValue()); } oprot.writeMapEnd(); } @@ -173097,10 +173097,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.util.Map.Entry _iter2037 : struct.part_vals.entrySet()) + for (java.util.Map.Entry _iter2045 : struct.part_vals.entrySet()) { - oprot.writeString(_iter2037.getKey()); - oprot.writeString(_iter2037.getValue()); + oprot.writeString(_iter2045.getKey()); + oprot.writeString(_iter2045.getValue()); } } } @@ -173123,15 +173123,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map2038 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.HashMap(2*_map2038.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2039; - @org.apache.thrift.annotation.Nullable java.lang.String _val2040; - for (int _i2041 = 0; _i2041 < _map2038.size; ++_i2041) + org.apache.thrift.protocol.TMap _map2046 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.HashMap(2*_map2046.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2047; + @org.apache.thrift.annotation.Nullable java.lang.String _val2048; + for (int _i2049 = 0; _i2049 < _map2046.size; ++_i2049) { - _key2039 = iprot.readString(); - _val2040 = iprot.readString(); - struct.part_vals.put(_key2039, _val2040); + _key2047 = iprot.readString(); + _val2048 = iprot.readString(); + struct.part_vals.put(_key2047, _val2048); } } struct.setPart_valsIsSet(true); @@ -174623,15 +174623,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map2042 = iprot.readMapBegin(); - struct.part_vals = new java.util.HashMap(2*_map2042.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2043; - @org.apache.thrift.annotation.Nullable java.lang.String _val2044; - for (int _i2045 = 0; _i2045 < _map2042.size; ++_i2045) + org.apache.thrift.protocol.TMap _map2050 = iprot.readMapBegin(); + struct.part_vals = new java.util.HashMap(2*_map2050.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2051; + @org.apache.thrift.annotation.Nullable java.lang.String _val2052; + for (int _i2053 = 0; _i2053 < _map2050.size; ++_i2053) { - _key2043 = iprot.readString(); - _val2044 = iprot.readString(); - struct.part_vals.put(_key2043, _val2044); + _key2051 = iprot.readString(); + _val2052 = iprot.readString(); + struct.part_vals.put(_key2051, _val2052); } iprot.readMapEnd(); } @@ -174675,10 +174675,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (java.util.Map.Entry _iter2046 : struct.part_vals.entrySet()) + for (java.util.Map.Entry _iter2054 : struct.part_vals.entrySet()) { - oprot.writeString(_iter2046.getKey()); - oprot.writeString(_iter2046.getValue()); + oprot.writeString(_iter2054.getKey()); + oprot.writeString(_iter2054.getValue()); } oprot.writeMapEnd(); } @@ -174729,10 +174729,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (java.util.Map.Entry _iter2047 : struct.part_vals.entrySet()) + for (java.util.Map.Entry _iter2055 : struct.part_vals.entrySet()) { - oprot.writeString(_iter2047.getKey()); - oprot.writeString(_iter2047.getValue()); + oprot.writeString(_iter2055.getKey()); + oprot.writeString(_iter2055.getValue()); } } } @@ -174755,15 +174755,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map2048 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); - struct.part_vals = new java.util.HashMap(2*_map2048.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2049; - @org.apache.thrift.annotation.Nullable java.lang.String _val2050; - for (int _i2051 = 0; _i2051 < _map2048.size; ++_i2051) + org.apache.thrift.protocol.TMap _map2056 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING); + struct.part_vals = new java.util.HashMap(2*_map2056.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2057; + @org.apache.thrift.annotation.Nullable java.lang.String _val2058; + for (int _i2059 = 0; _i2059 < _map2056.size; ++_i2059) { - _key2049 = iprot.readString(); - _val2050 = iprot.readString(); - struct.part_vals.put(_key2049, _val2050); + _key2057 = iprot.readString(); + _val2058 = iprot.readString(); + struct.part_vals.put(_key2057, _val2058); } } struct.setPart_valsIsSet(true); @@ -201467,13 +201467,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2052 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2052.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2053; - for (int _i2054 = 0; _i2054 < _list2052.size; ++_i2054) + org.apache.thrift.protocol.TList _list2060 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2060.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2061; + for (int _i2062 = 0; _i2062 < _list2060.size; ++_i2062) { - _elem2053 = iprot.readString(); - struct.success.add(_elem2053); + _elem2061 = iprot.readString(); + struct.success.add(_elem2061); } iprot.readListEnd(); } @@ -201508,9 +201508,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2055 : struct.success) + for (java.lang.String _iter2063 : struct.success) { - oprot.writeString(_iter2055); + oprot.writeString(_iter2063); } oprot.writeListEnd(); } @@ -201549,9 +201549,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2056 : struct.success) + for (java.lang.String _iter2064 : struct.success) { - oprot.writeString(_iter2056); + oprot.writeString(_iter2064); } } } @@ -201566,13 +201566,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2057 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2057.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2058; - for (int _i2059 = 0; _i2059 < _list2057.size; ++_i2059) + org.apache.thrift.protocol.TList _list2065 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2065.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2066; + for (int _i2067 = 0; _i2067 < _list2065.size; ++_i2067) { - _elem2058 = iprot.readString(); - struct.success.add(_elem2058); + _elem2066 = iprot.readString(); + struct.success.add(_elem2066); } } struct.setSuccessIsSet(true); @@ -205662,13 +205662,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2060 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2060.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2061; - for (int _i2062 = 0; _i2062 < _list2060.size; ++_i2062) + org.apache.thrift.protocol.TList _list2068 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2068.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2069; + for (int _i2070 = 0; _i2070 < _list2068.size; ++_i2070) { - _elem2061 = iprot.readString(); - struct.success.add(_elem2061); + _elem2069 = iprot.readString(); + struct.success.add(_elem2069); } iprot.readListEnd(); } @@ -205703,9 +205703,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2063 : struct.success) + for (java.lang.String _iter2071 : struct.success) { - oprot.writeString(_iter2063); + oprot.writeString(_iter2071); } oprot.writeListEnd(); } @@ -205744,9 +205744,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2064 : struct.success) + for (java.lang.String _iter2072 : struct.success) { - oprot.writeString(_iter2064); + oprot.writeString(_iter2072); } } } @@ -205761,13 +205761,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2065 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2065.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2066; - for (int _i2067 = 0; _i2067 < _list2065.size; ++_i2067) + org.apache.thrift.protocol.TList _list2073 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2073.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2074; + for (int _i2075 = 0; _i2075 < _list2073.size; ++_i2075) { - _elem2066 = iprot.readString(); - struct.success.add(_elem2066); + _elem2074 = iprot.readString(); + struct.success.add(_elem2074); } } struct.setSuccessIsSet(true); @@ -209074,14 +209074,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2068 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2068.size); - @org.apache.thrift.annotation.Nullable Role _elem2069; - for (int _i2070 = 0; _i2070 < _list2068.size; ++_i2070) + org.apache.thrift.protocol.TList _list2076 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2076.size); + @org.apache.thrift.annotation.Nullable Role _elem2077; + for (int _i2078 = 0; _i2078 < _list2076.size; ++_i2078) { - _elem2069 = new Role(); - _elem2069.read(iprot); - struct.success.add(_elem2069); + _elem2077 = new Role(); + _elem2077.read(iprot); + struct.success.add(_elem2077); } iprot.readListEnd(); } @@ -209116,9 +209116,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter2071 : struct.success) + for (Role _iter2079 : struct.success) { - _iter2071.write(oprot); + _iter2079.write(oprot); } oprot.writeListEnd(); } @@ -209157,9 +209157,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter2072 : struct.success) + for (Role _iter2080 : struct.success) { - _iter2072.write(oprot); + _iter2080.write(oprot); } } } @@ -209174,14 +209174,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2073 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list2073.size); - @org.apache.thrift.annotation.Nullable Role _elem2074; - for (int _i2075 = 0; _i2075 < _list2073.size; ++_i2075) + org.apache.thrift.protocol.TList _list2081 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list2081.size); + @org.apache.thrift.annotation.Nullable Role _elem2082; + for (int _i2083 = 0; _i2083 < _list2081.size; ++_i2083) { - _elem2074 = new Role(); - _elem2074.read(iprot); - struct.success.add(_elem2074); + _elem2082 = new Role(); + _elem2082.read(iprot); + struct.success.add(_elem2082); } } struct.setSuccessIsSet(true); @@ -212215,13 +212215,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2076 = iprot.readListBegin(); - struct.group_names = new java.util.ArrayList(_list2076.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2077; - for (int _i2078 = 0; _i2078 < _list2076.size; ++_i2078) + org.apache.thrift.protocol.TList _list2084 = iprot.readListBegin(); + struct.group_names = new java.util.ArrayList(_list2084.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2085; + for (int _i2086 = 0; _i2086 < _list2084.size; ++_i2086) { - _elem2077 = iprot.readString(); - struct.group_names.add(_elem2077); + _elem2085 = iprot.readString(); + struct.group_names.add(_elem2085); } iprot.readListEnd(); } @@ -212257,9 +212257,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (java.lang.String _iter2079 : struct.group_names) + for (java.lang.String _iter2087 : struct.group_names) { - oprot.writeString(_iter2079); + oprot.writeString(_iter2087); } oprot.writeListEnd(); } @@ -212302,9 +212302,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (java.lang.String _iter2080 : struct.group_names) + for (java.lang.String _iter2088 : struct.group_names) { - oprot.writeString(_iter2080); + oprot.writeString(_iter2088); } } } @@ -212325,13 +212325,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list2081 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.group_names = new java.util.ArrayList(_list2081.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2082; - for (int _i2083 = 0; _i2083 < _list2081.size; ++_i2083) + org.apache.thrift.protocol.TList _list2089 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.group_names = new java.util.ArrayList(_list2089.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2090; + for (int _i2091 = 0; _i2091 < _list2089.size; ++_i2091) { - _elem2082 = iprot.readString(); - struct.group_names.add(_elem2082); + _elem2090 = iprot.readString(); + struct.group_names.add(_elem2090); } } struct.setGroup_namesIsSet(true); @@ -213802,14 +213802,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2084 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2084.size); - @org.apache.thrift.annotation.Nullable HiveObjectPrivilege _elem2085; - for (int _i2086 = 0; _i2086 < _list2084.size; ++_i2086) + org.apache.thrift.protocol.TList _list2092 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2092.size); + @org.apache.thrift.annotation.Nullable HiveObjectPrivilege _elem2093; + for (int _i2094 = 0; _i2094 < _list2092.size; ++_i2094) { - _elem2085 = new HiveObjectPrivilege(); - _elem2085.read(iprot); - struct.success.add(_elem2085); + _elem2093 = new HiveObjectPrivilege(); + _elem2093.read(iprot); + struct.success.add(_elem2093); } iprot.readListEnd(); } @@ -213844,9 +213844,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter2087 : struct.success) + for (HiveObjectPrivilege _iter2095 : struct.success) { - _iter2087.write(oprot); + _iter2095.write(oprot); } oprot.writeListEnd(); } @@ -213885,9 +213885,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter2088 : struct.success) + for (HiveObjectPrivilege _iter2096 : struct.success) { - _iter2088.write(oprot); + _iter2096.write(oprot); } } } @@ -213902,14 +213902,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2089 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list2089.size); - @org.apache.thrift.annotation.Nullable HiveObjectPrivilege _elem2090; - for (int _i2091 = 0; _i2091 < _list2089.size; ++_i2091) + org.apache.thrift.protocol.TList _list2097 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list2097.size); + @org.apache.thrift.annotation.Nullable HiveObjectPrivilege _elem2098; + for (int _i2099 = 0; _i2099 < _list2097.size; ++_i2099) { - _elem2090 = new HiveObjectPrivilege(); - _elem2090.read(iprot); - struct.success.add(_elem2090); + _elem2098 = new HiveObjectPrivilege(); + _elem2098.read(iprot); + struct.success.add(_elem2098); } } struct.setSuccessIsSet(true); @@ -217887,13 +217887,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2092 = iprot.readListBegin(); - struct.group_names = new java.util.ArrayList(_list2092.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2093; - for (int _i2094 = 0; _i2094 < _list2092.size; ++_i2094) + org.apache.thrift.protocol.TList _list2100 = iprot.readListBegin(); + struct.group_names = new java.util.ArrayList(_list2100.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2101; + for (int _i2102 = 0; _i2102 < _list2100.size; ++_i2102) { - _elem2093 = iprot.readString(); - struct.group_names.add(_elem2093); + _elem2101 = iprot.readString(); + struct.group_names.add(_elem2101); } iprot.readListEnd(); } @@ -217924,9 +217924,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (java.lang.String _iter2095 : struct.group_names) + for (java.lang.String _iter2103 : struct.group_names) { - oprot.writeString(_iter2095); + oprot.writeString(_iter2103); } oprot.writeListEnd(); } @@ -217963,9 +217963,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (java.lang.String _iter2096 : struct.group_names) + for (java.lang.String _iter2104 : struct.group_names) { - oprot.writeString(_iter2096); + oprot.writeString(_iter2104); } } } @@ -217981,13 +217981,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list2097 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.group_names = new java.util.ArrayList(_list2097.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2098; - for (int _i2099 = 0; _i2099 < _list2097.size; ++_i2099) + org.apache.thrift.protocol.TList _list2105 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.group_names = new java.util.ArrayList(_list2105.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2106; + for (int _i2107 = 0; _i2107 < _list2105.size; ++_i2107) { - _elem2098 = iprot.readString(); - struct.group_names.add(_elem2098); + _elem2106 = iprot.readString(); + struct.group_names.add(_elem2106); } } struct.setGroup_namesIsSet(true); @@ -218395,13 +218395,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2100 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2100.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2101; - for (int _i2102 = 0; _i2102 < _list2100.size; ++_i2102) + org.apache.thrift.protocol.TList _list2108 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2108.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2109; + for (int _i2110 = 0; _i2110 < _list2108.size; ++_i2110) { - _elem2101 = iprot.readString(); - struct.success.add(_elem2101); + _elem2109 = iprot.readString(); + struct.success.add(_elem2109); } iprot.readListEnd(); } @@ -218436,9 +218436,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2103 : struct.success) + for (java.lang.String _iter2111 : struct.success) { - oprot.writeString(_iter2103); + oprot.writeString(_iter2111); } oprot.writeListEnd(); } @@ -218477,9 +218477,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2104 : struct.success) + for (java.lang.String _iter2112 : struct.success) { - oprot.writeString(_iter2104); + oprot.writeString(_iter2112); } } } @@ -218494,13 +218494,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2105 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2105.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2106; - for (int _i2107 = 0; _i2107 < _list2105.size; ++_i2107) + org.apache.thrift.protocol.TList _list2113 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2113.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2114; + for (int _i2115 = 0; _i2115 < _list2113.size; ++_i2115) { - _elem2106 = iprot.readString(); - struct.success.add(_elem2106); + _elem2114 = iprot.readString(); + struct.success.add(_elem2114); } } struct.setSuccessIsSet(true); @@ -223839,13 +223839,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2108 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2108.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2109; - for (int _i2110 = 0; _i2110 < _list2108.size; ++_i2110) + org.apache.thrift.protocol.TList _list2116 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2116.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2117; + for (int _i2118 = 0; _i2118 < _list2116.size; ++_i2118) { - _elem2109 = iprot.readString(); - struct.success.add(_elem2109); + _elem2117 = iprot.readString(); + struct.success.add(_elem2117); } iprot.readListEnd(); } @@ -223871,9 +223871,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2111 : struct.success) + for (java.lang.String _iter2119 : struct.success) { - oprot.writeString(_iter2111); + oprot.writeString(_iter2119); } oprot.writeListEnd(); } @@ -223904,9 +223904,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2112 : struct.success) + for (java.lang.String _iter2120 : struct.success) { - oprot.writeString(_iter2112); + oprot.writeString(_iter2120); } } } @@ -223918,13 +223918,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2113 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2113.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2114; - for (int _i2115 = 0; _i2115 < _list2113.size; ++_i2115) + org.apache.thrift.protocol.TList _list2121 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2121.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2122; + for (int _i2123 = 0; _i2123 < _list2121.size; ++_i2123) { - _elem2114 = iprot.readString(); - struct.success.add(_elem2114); + _elem2122 = iprot.readString(); + struct.success.add(_elem2122); } } struct.setSuccessIsSet(true); @@ -226975,13 +226975,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2116 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2116.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2117; - for (int _i2118 = 0; _i2118 < _list2116.size; ++_i2118) + org.apache.thrift.protocol.TList _list2124 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2124.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2125; + for (int _i2126 = 0; _i2126 < _list2124.size; ++_i2126) { - _elem2117 = iprot.readString(); - struct.success.add(_elem2117); + _elem2125 = iprot.readString(); + struct.success.add(_elem2125); } iprot.readListEnd(); } @@ -227007,9 +227007,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2119 : struct.success) + for (java.lang.String _iter2127 : struct.success) { - oprot.writeString(_iter2119); + oprot.writeString(_iter2127); } oprot.writeListEnd(); } @@ -227040,9 +227040,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2120 : struct.success) + for (java.lang.String _iter2128 : struct.success) { - oprot.writeString(_iter2120); + oprot.writeString(_iter2128); } } } @@ -227054,13 +227054,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2121 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2121.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2122; - for (int _i2123 = 0; _i2123 < _list2121.size; ++_i2123) + org.apache.thrift.protocol.TList _list2129 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2129.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2130; + for (int _i2131 = 0; _i2131 < _list2129.size; ++_i2131) { - _elem2122 = iprot.readString(); - struct.success.add(_elem2122); + _elem2130 = iprot.readString(); + struct.success.add(_elem2130); } } struct.setSuccessIsSet(true); @@ -234121,15 +234121,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_write_ids_to_mi case 2: // WRITE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map2124 = iprot.readMapBegin(); - struct.writeIds = new java.util.HashMap(2*_map2124.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2125; - long _val2126; - for (int _i2127 = 0; _i2127 < _map2124.size; ++_i2127) + org.apache.thrift.protocol.TMap _map2132 = iprot.readMapBegin(); + struct.writeIds = new java.util.HashMap(2*_map2132.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2133; + long _val2134; + for (int _i2135 = 0; _i2135 < _map2132.size; ++_i2135) { - _key2125 = iprot.readString(); - _val2126 = iprot.readI64(); - struct.writeIds.put(_key2125, _val2126); + _key2133 = iprot.readString(); + _val2134 = iprot.readI64(); + struct.writeIds.put(_key2133, _val2134); } iprot.readMapEnd(); } @@ -234158,10 +234158,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_write_ids_to_m oprot.writeFieldBegin(WRITE_IDS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64, struct.writeIds.size())); - for (java.util.Map.Entry _iter2128 : struct.writeIds.entrySet()) + for (java.util.Map.Entry _iter2136 : struct.writeIds.entrySet()) { - oprot.writeString(_iter2128.getKey()); - oprot.writeI64(_iter2128.getValue()); + oprot.writeString(_iter2136.getKey()); + oprot.writeI64(_iter2136.getValue()); } oprot.writeMapEnd(); } @@ -234198,10 +234198,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_write_ids_to_mi if (struct.isSetWriteIds()) { { oprot.writeI32(struct.writeIds.size()); - for (java.util.Map.Entry _iter2129 : struct.writeIds.entrySet()) + for (java.util.Map.Entry _iter2137 : struct.writeIds.entrySet()) { - oprot.writeString(_iter2129.getKey()); - oprot.writeI64(_iter2129.getValue()); + oprot.writeString(_iter2137.getKey()); + oprot.writeI64(_iter2137.getValue()); } } } @@ -234217,15 +234217,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_write_ids_to_min } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map2130 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64); - struct.writeIds = new java.util.HashMap(2*_map2130.size); - @org.apache.thrift.annotation.Nullable java.lang.String _key2131; - long _val2132; - for (int _i2133 = 0; _i2133 < _map2130.size; ++_i2133) + org.apache.thrift.protocol.TMap _map2138 = iprot.readMapBegin(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.I64); + struct.writeIds = new java.util.HashMap(2*_map2138.size); + @org.apache.thrift.annotation.Nullable java.lang.String _key2139; + long _val2140; + for (int _i2141 = 0; _i2141 < _map2138.size; ++_i2141) { - _key2131 = iprot.readString(); - _val2132 = iprot.readI64(); - struct.writeIds.put(_key2131, _val2132); + _key2139 = iprot.readString(); + _val2140 = iprot.readI64(); + struct.writeIds.put(_key2139, _val2140); } } struct.setWriteIdsIsSet(true); @@ -250222,13 +250222,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, find_columns_with_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2134 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2134.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2135; - for (int _i2136 = 0; _i2136 < _list2134.size; ++_i2136) + org.apache.thrift.protocol.TList _list2142 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2142.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2143; + for (int _i2144 = 0; _i2144 < _list2142.size; ++_i2144) { - _elem2135 = iprot.readString(); - struct.success.add(_elem2135); + _elem2143 = iprot.readString(); + struct.success.add(_elem2143); } iprot.readListEnd(); } @@ -250254,9 +250254,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, find_columns_with_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2137 : struct.success) + for (java.lang.String _iter2145 : struct.success) { - oprot.writeString(_iter2137); + oprot.writeString(_iter2145); } oprot.writeListEnd(); } @@ -250287,9 +250287,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, find_columns_with_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2138 : struct.success) + for (java.lang.String _iter2146 : struct.success) { - oprot.writeString(_iter2138); + oprot.writeString(_iter2146); } } } @@ -250301,13 +250301,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, find_columns_with_st java.util.BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2139 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2139.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2140; - for (int _i2141 = 0; _i2141 < _list2139.size; ++_i2141) + org.apache.thrift.protocol.TList _list2147 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2147.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2148; + for (int _i2149 = 0; _i2149 < _list2147.size; ++_i2149) { - _elem2140 = iprot.readString(); - struct.success.add(_elem2140); + _elem2148 = iprot.readString(); + struct.success.add(_elem2148); } } struct.setSuccessIsSet(true); @@ -291289,14 +291289,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_all_vers case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2142 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2142.size); - @org.apache.thrift.annotation.Nullable SchemaVersion _elem2143; - for (int _i2144 = 0; _i2144 < _list2142.size; ++_i2144) + org.apache.thrift.protocol.TList _list2150 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2150.size); + @org.apache.thrift.annotation.Nullable SchemaVersion _elem2151; + for (int _i2152 = 0; _i2152 < _list2150.size; ++_i2152) { - _elem2143 = new SchemaVersion(); - _elem2143.read(iprot); - struct.success.add(_elem2143); + _elem2151 = new SchemaVersion(); + _elem2151.read(iprot); + struct.success.add(_elem2151); } iprot.readListEnd(); } @@ -291340,9 +291340,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_all_ver oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (SchemaVersion _iter2145 : struct.success) + for (SchemaVersion _iter2153 : struct.success) { - _iter2145.write(oprot); + _iter2153.write(oprot); } oprot.writeListEnd(); } @@ -291389,9 +291389,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_all_vers if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (SchemaVersion _iter2146 : struct.success) + for (SchemaVersion _iter2154 : struct.success) { - _iter2146.write(oprot); + _iter2154.write(oprot); } } } @@ -291409,14 +291409,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_all_versi java.util.BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2147 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list2147.size); - @org.apache.thrift.annotation.Nullable SchemaVersion _elem2148; - for (int _i2149 = 0; _i2149 < _list2147.size; ++_i2149) + org.apache.thrift.protocol.TList _list2155 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list2155.size); + @org.apache.thrift.annotation.Nullable SchemaVersion _elem2156; + for (int _i2157 = 0; _i2157 < _list2155.size; ++_i2157) { - _elem2148 = new SchemaVersion(); - _elem2148.read(iprot); - struct.success.add(_elem2148); + _elem2156 = new SchemaVersion(); + _elem2156.read(iprot); + struct.success.add(_elem2156); } } struct.setSuccessIsSet(true); @@ -300031,14 +300031,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_runtime_stats_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2150 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2150.size); - @org.apache.thrift.annotation.Nullable RuntimeStat _elem2151; - for (int _i2152 = 0; _i2152 < _list2150.size; ++_i2152) + org.apache.thrift.protocol.TList _list2158 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2158.size); + @org.apache.thrift.annotation.Nullable RuntimeStat _elem2159; + for (int _i2160 = 0; _i2160 < _list2158.size; ++_i2160) { - _elem2151 = new RuntimeStat(); - _elem2151.read(iprot); - struct.success.add(_elem2151); + _elem2159 = new RuntimeStat(); + _elem2159.read(iprot); + struct.success.add(_elem2159); } iprot.readListEnd(); } @@ -300073,9 +300073,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_runtime_stats_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (RuntimeStat _iter2153 : struct.success) + for (RuntimeStat _iter2161 : struct.success) { - _iter2153.write(oprot); + _iter2161.write(oprot); } oprot.writeListEnd(); } @@ -300114,9 +300114,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (RuntimeStat _iter2154 : struct.success) + for (RuntimeStat _iter2162 : struct.success) { - _iter2154.write(oprot); + _iter2162.write(oprot); } } } @@ -300131,14 +300131,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_runtime_stats_re java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2155 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list2155.size); - @org.apache.thrift.annotation.Nullable RuntimeStat _elem2156; - for (int _i2157 = 0; _i2157 < _list2155.size; ++_i2157) + org.apache.thrift.protocol.TList _list2163 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list2163.size); + @org.apache.thrift.annotation.Nullable RuntimeStat _elem2164; + for (int _i2165 = 0; _i2165 < _list2163.size; ++_i2165) { - _elem2156 = new RuntimeStat(); - _elem2156.read(iprot); - struct.success.add(_elem2156); + _elem2164 = new RuntimeStat(); + _elem2164.read(iprot); + struct.success.add(_elem2164); } } struct.setSuccessIsSet(true); @@ -310249,13 +310249,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_stored_proc case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2158 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2158.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2159; - for (int _i2160 = 0; _i2160 < _list2158.size; ++_i2160) + org.apache.thrift.protocol.TList _list2166 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2166.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2167; + for (int _i2168 = 0; _i2168 < _list2166.size; ++_i2168) { - _elem2159 = iprot.readString(); - struct.success.add(_elem2159); + _elem2167 = iprot.readString(); + struct.success.add(_elem2167); } iprot.readListEnd(); } @@ -310290,9 +310290,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_stored_pro oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2161 : struct.success) + for (java.lang.String _iter2169 : struct.success) { - oprot.writeString(_iter2161); + oprot.writeString(_iter2169); } oprot.writeListEnd(); } @@ -310331,9 +310331,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_stored_proc if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2162 : struct.success) + for (java.lang.String _iter2170 : struct.success) { - oprot.writeString(_iter2162); + oprot.writeString(_iter2170); } } } @@ -310348,13 +310348,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_stored_proce java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2163 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2163.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2164; - for (int _i2165 = 0; _i2165 < _list2163.size; ++_i2165) + org.apache.thrift.protocol.TList _list2171 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2171.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2172; + for (int _i2173 = 0; _i2173 < _list2171.size; ++_i2173) { - _elem2164 = iprot.readString(); - struct.success.add(_elem2164); + _elem2172 = iprot.readString(); + struct.success.add(_elem2172); } } struct.setSuccessIsSet(true); @@ -312811,13 +312811,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_packages_re case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2166 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2166.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2167; - for (int _i2168 = 0; _i2168 < _list2166.size; ++_i2168) + org.apache.thrift.protocol.TList _list2174 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2174.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2175; + for (int _i2176 = 0; _i2176 < _list2174.size; ++_i2176) { - _elem2167 = iprot.readString(); - struct.success.add(_elem2167); + _elem2175 = iprot.readString(); + struct.success.add(_elem2175); } iprot.readListEnd(); } @@ -312852,9 +312852,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_packages_r oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (java.lang.String _iter2169 : struct.success) + for (java.lang.String _iter2177 : struct.success) { - oprot.writeString(_iter2169); + oprot.writeString(_iter2177); } oprot.writeListEnd(); } @@ -312893,9 +312893,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_packages_re if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (java.lang.String _iter2170 : struct.success) + for (java.lang.String _iter2178 : struct.success) { - oprot.writeString(_iter2170); + oprot.writeString(_iter2178); } } } @@ -312910,13 +312910,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_packages_res java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2171 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); - struct.success = new java.util.ArrayList(_list2171.size); - @org.apache.thrift.annotation.Nullable java.lang.String _elem2172; - for (int _i2173 = 0; _i2173 < _list2171.size; ++_i2173) + org.apache.thrift.protocol.TList _list2179 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRING); + struct.success = new java.util.ArrayList(_list2179.size); + @org.apache.thrift.annotation.Nullable java.lang.String _elem2180; + for (int _i2181 = 0; _i2181 < _list2179.size; ++_i2181) { - _elem2172 = iprot.readString(); - struct.success.add(_elem2172); + _elem2180 = iprot.readString(); + struct.success.add(_elem2180); } } struct.setSuccessIsSet(true); @@ -314430,14 +314430,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_write_event case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list2174 = iprot.readListBegin(); - struct.success = new java.util.ArrayList(_list2174.size); - @org.apache.thrift.annotation.Nullable WriteEventInfo _elem2175; - for (int _i2176 = 0; _i2176 < _list2174.size; ++_i2176) + org.apache.thrift.protocol.TList _list2182 = iprot.readListBegin(); + struct.success = new java.util.ArrayList(_list2182.size); + @org.apache.thrift.annotation.Nullable WriteEventInfo _elem2183; + for (int _i2184 = 0; _i2184 < _list2182.size; ++_i2184) { - _elem2175 = new WriteEventInfo(); - _elem2175.read(iprot); - struct.success.add(_elem2175); + _elem2183 = new WriteEventInfo(); + _elem2183.read(iprot); + struct.success.add(_elem2183); } iprot.readListEnd(); } @@ -314472,9 +314472,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_write_even oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (WriteEventInfo _iter2177 : struct.success) + for (WriteEventInfo _iter2185 : struct.success) { - _iter2177.write(oprot); + _iter2185.write(oprot); } oprot.writeListEnd(); } @@ -314513,9 +314513,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_write_event if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (WriteEventInfo _iter2178 : struct.success) + for (WriteEventInfo _iter2186 : struct.success) { - _iter2178.write(oprot); + _iter2186.write(oprot); } } } @@ -314530,14 +314530,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_write_event_ java.util.BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list2179 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); - struct.success = new java.util.ArrayList(_list2179.size); - @org.apache.thrift.annotation.Nullable WriteEventInfo _elem2180; - for (int _i2181 = 0; _i2181 < _list2179.size; ++_i2181) + org.apache.thrift.protocol.TList _list2187 = iprot.readListBegin(org.apache.thrift.protocol.TType.STRUCT); + struct.success = new java.util.ArrayList(_list2187.size); + @org.apache.thrift.annotation.Nullable WriteEventInfo _elem2188; + for (int _i2189 = 0; _i2189 < _list2187.size; ++_i2189) { - _elem2180 = new WriteEventInfo(); - _elem2180.read(iprot); - struct.success.add(_elem2180); + _elem2188 = new WriteEventInfo(); + _elem2188.read(iprot); + struct.success.add(_elem2188); } } struct.setSuccessIsSet(true); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java index 776683b882f8..f5a102ab9647 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/hive_metastoreConstants.java @@ -97,6 +97,4 @@ public static final java.lang.String EXPECTED_PARAMETER_VALUE = "expected_parameter_value"; - public static final java.lang.String DEFAULT_TABLE_LOCATION = "defaultLocation"; - } diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php index 0387862ec870..e5f9553ef95a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ColumnStatistics.php @@ -64,7 +64,7 @@ class ColumnStatistics /** * @var string */ - public $engine = null; + public $engine = "hive"; public function __construct($vals = null) { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetOpenTxnsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetOpenTxnsRequest.php index 9b2db717d0f4..f34820d0501a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetOpenTxnsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetOpenTxnsRequest.php @@ -69,13 +69,13 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->excludeTxnTypes = array(); - $_size1339 = 0; - $_etype1342 = 0; - $xfer += $input->readListBegin($_etype1342, $_size1339); - for ($_i1343 = 0; $_i1343 < $_size1339; ++$_i1343) { - $elem1344 = null; - $xfer += $input->readI32($elem1344); - $this->excludeTxnTypes []= $elem1344; + $_size1346 = 0; + $_etype1349 = 0; + $xfer += $input->readListBegin($_etype1349, $_size1346); + for ($_i1350 = 0; $_i1350 < $_size1346; ++$_i1350) { + $elem1351 = null; + $xfer += $input->readI32($elem1351); + $this->excludeTxnTypes []= $elem1351; } $xfer += $input->readListEnd(); } else { @@ -102,8 +102,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('excludeTxnTypes', TType::LST, 1); $output->writeListBegin(TType::I32, count($this->excludeTxnTypes)); - foreach ($this->excludeTxnTypes as $iter1345) { - $xfer += $output->writeI32($iter1345); + foreach ($this->excludeTxnTypes as $iter1352) { + $xfer += $output->writeI32($iter1352); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php index c06aa5ac27e9..e21e5fa5ad98 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsByNamesRequest.php @@ -123,7 +123,7 @@ class GetPartitionsByNamesRequest /** * @var string */ - public $engine = null; + public $engine = "hive"; /** * @var string */ diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthRequest.php index 19c68368627e..80209821d954 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthRequest.php @@ -89,6 +89,15 @@ class GetPartitionsPsWithAuthRequest 'isRequired' => false, 'type' => TType::STRING, ), + 13 => array( + 'var' => 'partNames', + 'isRequired' => false, + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), ); /** @@ -139,6 +148,10 @@ class GetPartitionsPsWithAuthRequest * @var string */ public $excludeParamKeyPattern = null; + /** + * @var string[] + */ + public $partNames = null; public function __construct($vals = null) { @@ -179,6 +192,9 @@ public function __construct($vals = null) if (isset($vals['excludeParamKeyPattern'])) { $this->excludeParamKeyPattern = $vals['excludeParamKeyPattern']; } + if (isset($vals['partNames'])) { + $this->partNames = $vals['partNames']; + } } } @@ -303,6 +319,22 @@ public function read($input) $xfer += $input->skip($ftype); } break; + case 13: + if ($ftype == TType::LST) { + $this->partNames = array(); + $_size1323 = 0; + $_etype1326 = 0; + $xfer += $input->readListBegin($_etype1326, $_size1323); + for ($_i1327 = 0; $_i1327 < $_size1323; ++$_i1327) { + $elem1328 = null; + $xfer += $input->readString($elem1328); + $this->partNames []= $elem1328; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -338,8 +370,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('partVals', TType::LST, 4); $output->writeListBegin(TType::STRING, count($this->partVals)); - foreach ($this->partVals as $iter1323) { - $xfer += $output->writeString($iter1323); + foreach ($this->partVals as $iter1329) { + $xfer += $output->writeString($iter1329); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -360,8 +392,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('groupNames', TType::LST, 7); $output->writeListBegin(TType::STRING, count($this->groupNames)); - foreach ($this->groupNames as $iter1324) { - $xfer += $output->writeString($iter1324); + foreach ($this->groupNames as $iter1330) { + $xfer += $output->writeString($iter1330); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -391,6 +423,18 @@ public function write($output) $xfer += $output->writeString($this->excludeParamKeyPattern); $xfer += $output->writeFieldEnd(); } + if ($this->partNames !== null) { + if (!is_array($this->partNames)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('partNames', TType::LST, 13); + $output->writeListBegin(TType::STRING, count($this->partNames)); + foreach ($this->partNames as $iter1331) { + $xfer += $output->writeString($iter1331); + } + $output->writeListEnd(); + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthResponse.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthResponse.php index 3c8a7092cd90..f08a4fc4ea35 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthResponse.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetPartitionsPsWithAuthResponse.php @@ -69,14 +69,14 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size1325 = 0; - $_etype1328 = 0; - $xfer += $input->readListBegin($_etype1328, $_size1325); - for ($_i1329 = 0; $_i1329 < $_size1325; ++$_i1329) { - $elem1330 = null; - $elem1330 = new \metastore\Partition(); - $xfer += $elem1330->read($input); - $this->partitions []= $elem1330; + $_size1332 = 0; + $_etype1335 = 0; + $xfer += $input->readListBegin($_etype1335, $_size1332); + for ($_i1336 = 0; $_i1336 < $_size1332; ++$_i1336) { + $elem1337 = null; + $elem1337 = new \metastore\Partition(); + $xfer += $elem1337->read($input); + $this->partitions []= $elem1337; } $xfer += $input->readListEnd(); } else { @@ -103,8 +103,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('partitions', TType::LST, 1); $output->writeListBegin(TType::STRUCT, count($this->partitions)); - foreach ($this->partitions as $iter1331) { - $xfer += $iter1331->write($output); + foreach ($this->partitions as $iter1338) { + $xfer += $iter1338->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php index 3c56331bb0f5..217f5a377b29 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/GetTableRequest.php @@ -113,7 +113,7 @@ class GetTableRequest /** * @var string */ - public $engine = null; + public $engine = "hive"; /** * @var int */ diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php index c7b3f053ae72..2c039313bd5b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/PartitionsStatsRequest.php @@ -61,7 +61,7 @@ class PartitionsStatsRequest ), 7 => array( 'var' => 'engine', - 'isRequired' => true, + 'isRequired' => false, 'type' => TType::STRING, ), ); @@ -93,7 +93,7 @@ class PartitionsStatsRequest /** * @var string */ - public $engine = null; + public $engine = "hive"; public function __construct($vals = null) { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplicationMetricList.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplicationMetricList.php index 5c136ac01a8c..8926dcb1e65e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplicationMetricList.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ReplicationMetricList.php @@ -69,14 +69,14 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->replicationMetricList = array(); - $_size1332 = 0; - $_etype1335 = 0; - $xfer += $input->readListBegin($_etype1335, $_size1332); - for ($_i1336 = 0; $_i1336 < $_size1332; ++$_i1336) { - $elem1337 = null; - $elem1337 = new \metastore\ReplicationMetrics(); - $xfer += $elem1337->read($input); - $this->replicationMetricList []= $elem1337; + $_size1339 = 0; + $_etype1342 = 0; + $xfer += $input->readListBegin($_etype1342, $_size1339); + for ($_i1343 = 0; $_i1343 < $_size1339; ++$_i1343) { + $elem1344 = null; + $elem1344 = new \metastore\ReplicationMetrics(); + $xfer += $elem1344->read($input); + $this->replicationMetricList []= $elem1344; } $xfer += $input->readListEnd(); } else { @@ -103,8 +103,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('replicationMetricList', TType::LST, 1); $output->writeListBegin(TType::STRUCT, count($this->replicationMetricList)); - foreach ($this->replicationMetricList as $iter1338) { - $xfer += $iter1338->write($output); + foreach ($this->replicationMetricList as $iter1345) { + $xfer += $iter1345->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php index dae3c80aa277..ca7bad0cfb24 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/SetPartitionsStatsRequest.php @@ -48,7 +48,7 @@ class SetPartitionsStatsRequest ), 5 => array( 'var' => 'engine', - 'isRequired' => true, + 'isRequired' => false, 'type' => TType::STRING, ), ); @@ -72,7 +72,7 @@ class SetPartitionsStatsRequest /** * @var string */ - public $engine = null; + public $engine = "hive"; public function __construct($vals = null) { diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php index 00c26651ff46..00d3ebb33d94 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/TableStatsRequest.php @@ -52,7 +52,7 @@ class TableStatsRequest ), 6 => array( 'var' => 'engine', - 'isRequired' => true, + 'isRequired' => false, 'type' => TType::STRING, ), 7 => array( @@ -85,7 +85,7 @@ class TableStatsRequest /** * @var string */ - public $engine = null; + public $engine = "hive"; /** * @var int */ diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_args.php index a32966ffdc6f..1dc57b40b50a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_args.php @@ -69,14 +69,14 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1530 = 0; - $_etype1533 = 0; - $xfer += $input->readListBegin($_etype1533, $_size1530); - for ($_i1534 = 0; $_i1534 < $_size1530; ++$_i1534) { - $elem1535 = null; - $elem1535 = new \metastore\Partition(); - $xfer += $elem1535->read($input); - $this->new_parts []= $elem1535; + $_size1537 = 0; + $_etype1540 = 0; + $xfer += $input->readListBegin($_etype1540, $_size1537); + for ($_i1541 = 0; $_i1541 < $_size1537; ++$_i1541) { + $elem1542 = null; + $elem1542 = new \metastore\Partition(); + $xfer += $elem1542->read($input); + $this->new_parts []= $elem1542; } $xfer += $input->readListEnd(); } else { @@ -103,8 +103,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1); $output->writeListBegin(TType::STRUCT, count($this->new_parts)); - foreach ($this->new_parts as $iter1536) { - $xfer += $iter1536->write($output); + foreach ($this->new_parts as $iter1543) { + $xfer += $iter1543->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_pspec_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_pspec_args.php index 2bbda0ca9aee..80216cf42726 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_pspec_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_partitions_pspec_args.php @@ -69,14 +69,14 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1537 = 0; - $_etype1540 = 0; - $xfer += $input->readListBegin($_etype1540, $_size1537); - for ($_i1541 = 0; $_i1541 < $_size1537; ++$_i1541) { - $elem1542 = null; - $elem1542 = new \metastore\PartitionSpec(); - $xfer += $elem1542->read($input); - $this->new_parts []= $elem1542; + $_size1544 = 0; + $_etype1547 = 0; + $xfer += $input->readListBegin($_etype1547, $_size1544); + for ($_i1548 = 0; $_i1548 < $_size1544; ++$_i1548) { + $elem1549 = null; + $elem1549 = new \metastore\PartitionSpec(); + $xfer += $elem1549->read($input); + $this->new_parts []= $elem1549; } $xfer += $input->readListEnd(); } else { @@ -103,8 +103,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('new_parts', TType::LST, 1); $output->writeListBegin(TType::STRUCT, count($this->new_parts)); - foreach ($this->new_parts as $iter1543) { - $xfer += $iter1543->write($output); + foreach ($this->new_parts as $iter1550) { + $xfer += $iter1550->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_write_ids_to_min_history_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_write_ids_to_min_history_args.php index 8589337a8431..05df1f2cabdb 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_write_ids_to_min_history_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_add_write_ids_to_min_history_args.php @@ -91,16 +91,16 @@ public function read($input) case 2: if ($ftype == TType::MAP) { $this->writeIds = array(); - $_size1869 = 0; - $_ktype1870 = 0; - $_vtype1871 = 0; - $xfer += $input->readMapBegin($_ktype1870, $_vtype1871, $_size1869); - for ($_i1873 = 0; $_i1873 < $_size1869; ++$_i1873) { - $key1874 = ''; - $val1875 = 0; - $xfer += $input->readString($key1874); - $xfer += $input->readI64($val1875); - $this->writeIds[$key1874] = $val1875; + $_size1876 = 0; + $_ktype1877 = 0; + $_vtype1878 = 0; + $xfer += $input->readMapBegin($_ktype1877, $_vtype1878, $_size1876); + for ($_i1880 = 0; $_i1880 < $_size1876; ++$_i1880) { + $key1881 = ''; + $val1882 = 0; + $xfer += $input->readString($key1881); + $xfer += $input->readI64($val1882); + $this->writeIds[$key1881] = $val1882; } $xfer += $input->readMapEnd(); } else { @@ -132,9 +132,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('writeIds', TType::MAP, 2); $output->writeMapBegin(TType::STRING, TType::I64, count($this->writeIds)); - foreach ($this->writeIds as $kiter1876 => $viter1877) { - $xfer += $output->writeString($kiter1876); - $xfer += $output->writeI64($viter1877); + foreach ($this->writeIds as $kiter1883 => $viter1884) { + $xfer += $output->writeString($kiter1883); + $xfer += $output->writeI64($viter1884); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_args.php index 2dae7ba57dcd..c5b5c2a76af8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_args.php @@ -107,14 +107,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1744 = 0; - $_etype1747 = 0; - $xfer += $input->readListBegin($_etype1747, $_size1744); - for ($_i1748 = 0; $_i1748 < $_size1744; ++$_i1748) { - $elem1749 = null; - $elem1749 = new \metastore\Partition(); - $xfer += $elem1749->read($input); - $this->new_parts []= $elem1749; + $_size1751 = 0; + $_etype1754 = 0; + $xfer += $input->readListBegin($_etype1754, $_size1751); + for ($_i1755 = 0; $_i1755 < $_size1751; ++$_i1755) { + $elem1756 = null; + $elem1756 = new \metastore\Partition(); + $xfer += $elem1756->read($input); + $this->new_parts []= $elem1756; } $xfer += $input->readListEnd(); } else { @@ -151,8 +151,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('new_parts', TType::LST, 3); $output->writeListBegin(TType::STRUCT, count($this->new_parts)); - foreach ($this->new_parts as $iter1750) { - $xfer += $iter1750->write($output); + foreach ($this->new_parts as $iter1757) { + $xfer += $iter1757->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_with_environment_context_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_with_environment_context_args.php index b8129f56cb9d..42f1fa4248a3 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_with_environment_context_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_alter_partitions_with_environment_context_args.php @@ -120,14 +120,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size1751 = 0; - $_etype1754 = 0; - $xfer += $input->readListBegin($_etype1754, $_size1751); - for ($_i1755 = 0; $_i1755 < $_size1751; ++$_i1755) { - $elem1756 = null; - $elem1756 = new \metastore\Partition(); - $xfer += $elem1756->read($input); - $this->new_parts []= $elem1756; + $_size1758 = 0; + $_etype1761 = 0; + $xfer += $input->readListBegin($_etype1761, $_size1758); + for ($_i1762 = 0; $_i1762 < $_size1758; ++$_i1762) { + $elem1763 = null; + $elem1763 = new \metastore\Partition(); + $xfer += $elem1763->read($input); + $this->new_parts []= $elem1763; } $xfer += $input->readListEnd(); } else { @@ -172,8 +172,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('new_parts', TType::LST, 3); $output->writeListBegin(TType::STRUCT, count($this->new_parts)); - foreach ($this->new_parts as $iter1757) { - $xfer += $iter1757->write($output); + foreach ($this->new_parts as $iter1764) { + $xfer += $iter1764->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_args.php index f20598fc6701..d8c58f4e318b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_args.php @@ -106,13 +106,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1544 = 0; - $_etype1547 = 0; - $xfer += $input->readListBegin($_etype1547, $_size1544); - for ($_i1548 = 0; $_i1548 < $_size1544; ++$_i1548) { - $elem1549 = null; - $xfer += $input->readString($elem1549); - $this->part_vals []= $elem1549; + $_size1551 = 0; + $_etype1554 = 0; + $xfer += $input->readListBegin($_etype1554, $_size1551); + for ($_i1555 = 0; $_i1555 < $_size1551; ++$_i1555) { + $elem1556 = null; + $xfer += $input->readString($elem1556); + $this->part_vals []= $elem1556; } $xfer += $input->readListEnd(); } else { @@ -149,8 +149,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1550) { - $xfer += $output->writeString($iter1550); + foreach ($this->part_vals as $iter1557) { + $xfer += $output->writeString($iter1557); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_with_environment_context_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_with_environment_context_args.php index b2e1ffbe9e34..c447067cc283 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_with_environment_context_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_append_partition_with_environment_context_args.php @@ -119,13 +119,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1551 = 0; - $_etype1554 = 0; - $xfer += $input->readListBegin($_etype1554, $_size1551); - for ($_i1555 = 0; $_i1555 < $_size1551; ++$_i1555) { - $elem1556 = null; - $xfer += $input->readString($elem1556); - $this->part_vals []= $elem1556; + $_size1558 = 0; + $_etype1561 = 0; + $xfer += $input->readListBegin($_etype1561, $_size1558); + for ($_i1562 = 0; $_i1562 < $_size1558; ++$_i1562) { + $elem1563 = null; + $xfer += $input->readString($elem1563); + $this->part_vals []= $elem1563; } $xfer += $input->readListEnd(); } else { @@ -170,8 +170,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1557) { - $xfer += $output->writeString($iter1557); + foreach ($this->part_vals as $iter1564) { + $xfer += $output->writeString($iter1564); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_create_table_with_constraints_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_create_table_with_constraints_args.php index 6a77f79145d1..dace4a5a191c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_create_table_with_constraints_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_create_table_with_constraints_args.php @@ -175,14 +175,14 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->primaryKeys = array(); - $_size1404 = 0; - $_etype1407 = 0; - $xfer += $input->readListBegin($_etype1407, $_size1404); - for ($_i1408 = 0; $_i1408 < $_size1404; ++$_i1408) { - $elem1409 = null; - $elem1409 = new \metastore\SQLPrimaryKey(); - $xfer += $elem1409->read($input); - $this->primaryKeys []= $elem1409; + $_size1411 = 0; + $_etype1414 = 0; + $xfer += $input->readListBegin($_etype1414, $_size1411); + for ($_i1415 = 0; $_i1415 < $_size1411; ++$_i1415) { + $elem1416 = null; + $elem1416 = new \metastore\SQLPrimaryKey(); + $xfer += $elem1416->read($input); + $this->primaryKeys []= $elem1416; } $xfer += $input->readListEnd(); } else { @@ -192,14 +192,14 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->foreignKeys = array(); - $_size1410 = 0; - $_etype1413 = 0; - $xfer += $input->readListBegin($_etype1413, $_size1410); - for ($_i1414 = 0; $_i1414 < $_size1410; ++$_i1414) { - $elem1415 = null; - $elem1415 = new \metastore\SQLForeignKey(); - $xfer += $elem1415->read($input); - $this->foreignKeys []= $elem1415; + $_size1417 = 0; + $_etype1420 = 0; + $xfer += $input->readListBegin($_etype1420, $_size1417); + for ($_i1421 = 0; $_i1421 < $_size1417; ++$_i1421) { + $elem1422 = null; + $elem1422 = new \metastore\SQLForeignKey(); + $xfer += $elem1422->read($input); + $this->foreignKeys []= $elem1422; } $xfer += $input->readListEnd(); } else { @@ -209,14 +209,14 @@ public function read($input) case 4: if ($ftype == TType::LST) { $this->uniqueConstraints = array(); - $_size1416 = 0; - $_etype1419 = 0; - $xfer += $input->readListBegin($_etype1419, $_size1416); - for ($_i1420 = 0; $_i1420 < $_size1416; ++$_i1420) { - $elem1421 = null; - $elem1421 = new \metastore\SQLUniqueConstraint(); - $xfer += $elem1421->read($input); - $this->uniqueConstraints []= $elem1421; + $_size1423 = 0; + $_etype1426 = 0; + $xfer += $input->readListBegin($_etype1426, $_size1423); + for ($_i1427 = 0; $_i1427 < $_size1423; ++$_i1427) { + $elem1428 = null; + $elem1428 = new \metastore\SQLUniqueConstraint(); + $xfer += $elem1428->read($input); + $this->uniqueConstraints []= $elem1428; } $xfer += $input->readListEnd(); } else { @@ -226,14 +226,14 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->notNullConstraints = array(); - $_size1422 = 0; - $_etype1425 = 0; - $xfer += $input->readListBegin($_etype1425, $_size1422); - for ($_i1426 = 0; $_i1426 < $_size1422; ++$_i1426) { - $elem1427 = null; - $elem1427 = new \metastore\SQLNotNullConstraint(); - $xfer += $elem1427->read($input); - $this->notNullConstraints []= $elem1427; + $_size1429 = 0; + $_etype1432 = 0; + $xfer += $input->readListBegin($_etype1432, $_size1429); + for ($_i1433 = 0; $_i1433 < $_size1429; ++$_i1433) { + $elem1434 = null; + $elem1434 = new \metastore\SQLNotNullConstraint(); + $xfer += $elem1434->read($input); + $this->notNullConstraints []= $elem1434; } $xfer += $input->readListEnd(); } else { @@ -243,14 +243,14 @@ public function read($input) case 6: if ($ftype == TType::LST) { $this->defaultConstraints = array(); - $_size1428 = 0; - $_etype1431 = 0; - $xfer += $input->readListBegin($_etype1431, $_size1428); - for ($_i1432 = 0; $_i1432 < $_size1428; ++$_i1432) { - $elem1433 = null; - $elem1433 = new \metastore\SQLDefaultConstraint(); - $xfer += $elem1433->read($input); - $this->defaultConstraints []= $elem1433; + $_size1435 = 0; + $_etype1438 = 0; + $xfer += $input->readListBegin($_etype1438, $_size1435); + for ($_i1439 = 0; $_i1439 < $_size1435; ++$_i1439) { + $elem1440 = null; + $elem1440 = new \metastore\SQLDefaultConstraint(); + $xfer += $elem1440->read($input); + $this->defaultConstraints []= $elem1440; } $xfer += $input->readListEnd(); } else { @@ -260,14 +260,14 @@ public function read($input) case 7: if ($ftype == TType::LST) { $this->checkConstraints = array(); - $_size1434 = 0; - $_etype1437 = 0; - $xfer += $input->readListBegin($_etype1437, $_size1434); - for ($_i1438 = 0; $_i1438 < $_size1434; ++$_i1438) { - $elem1439 = null; - $elem1439 = new \metastore\SQLCheckConstraint(); - $xfer += $elem1439->read($input); - $this->checkConstraints []= $elem1439; + $_size1441 = 0; + $_etype1444 = 0; + $xfer += $input->readListBegin($_etype1444, $_size1441); + for ($_i1445 = 0; $_i1445 < $_size1441; ++$_i1445) { + $elem1446 = null; + $elem1446 = new \metastore\SQLCheckConstraint(); + $xfer += $elem1446->read($input); + $this->checkConstraints []= $elem1446; } $xfer += $input->readListEnd(); } else { @@ -302,8 +302,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('primaryKeys', TType::LST, 2); $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); - foreach ($this->primaryKeys as $iter1440) { - $xfer += $iter1440->write($output); + foreach ($this->primaryKeys as $iter1447) { + $xfer += $iter1447->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -314,8 +314,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('foreignKeys', TType::LST, 3); $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); - foreach ($this->foreignKeys as $iter1441) { - $xfer += $iter1441->write($output); + foreach ($this->foreignKeys as $iter1448) { + $xfer += $iter1448->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -326,8 +326,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('uniqueConstraints', TType::LST, 4); $output->writeListBegin(TType::STRUCT, count($this->uniqueConstraints)); - foreach ($this->uniqueConstraints as $iter1442) { - $xfer += $iter1442->write($output); + foreach ($this->uniqueConstraints as $iter1449) { + $xfer += $iter1449->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -338,8 +338,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('notNullConstraints', TType::LST, 5); $output->writeListBegin(TType::STRUCT, count($this->notNullConstraints)); - foreach ($this->notNullConstraints as $iter1443) { - $xfer += $iter1443->write($output); + foreach ($this->notNullConstraints as $iter1450) { + $xfer += $iter1450->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -350,8 +350,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('defaultConstraints', TType::LST, 6); $output->writeListBegin(TType::STRUCT, count($this->defaultConstraints)); - foreach ($this->defaultConstraints as $iter1444) { - $xfer += $iter1444->write($output); + foreach ($this->defaultConstraints as $iter1451) { + $xfer += $iter1451->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -362,8 +362,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('checkConstraints', TType::LST, 7); $output->writeListBegin(TType::STRUCT, count($this->checkConstraints)); - foreach ($this->checkConstraints as $iter1445) { - $xfer += $iter1445->write($output); + foreach ($this->checkConstraints as $iter1452) { + $xfer += $iter1452->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_args.php index af48d9f1470d..f34eaae559d2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_args.php @@ -118,13 +118,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1558 = 0; - $_etype1561 = 0; - $xfer += $input->readListBegin($_etype1561, $_size1558); - for ($_i1562 = 0; $_i1562 < $_size1558; ++$_i1562) { - $elem1563 = null; - $xfer += $input->readString($elem1563); - $this->part_vals []= $elem1563; + $_size1565 = 0; + $_etype1568 = 0; + $xfer += $input->readListBegin($_etype1568, $_size1565); + for ($_i1569 = 0; $_i1569 < $_size1565; ++$_i1569) { + $elem1570 = null; + $xfer += $input->readString($elem1570); + $this->part_vals []= $elem1570; } $xfer += $input->readListEnd(); } else { @@ -168,8 +168,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1564) { - $xfer += $output->writeString($iter1564); + foreach ($this->part_vals as $iter1571) { + $xfer += $output->writeString($iter1571); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_with_environment_context_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_with_environment_context_args.php index 6d436e6d5d2f..9049d55a59a6 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_with_environment_context_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_drop_partition_with_environment_context_args.php @@ -131,13 +131,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1565 = 0; - $_etype1568 = 0; - $xfer += $input->readListBegin($_etype1568, $_size1565); - for ($_i1569 = 0; $_i1569 < $_size1565; ++$_i1569) { - $elem1570 = null; - $xfer += $input->readString($elem1570); - $this->part_vals []= $elem1570; + $_size1572 = 0; + $_etype1575 = 0; + $xfer += $input->readListBegin($_etype1575, $_size1572); + for ($_i1576 = 0; $_i1576 < $_size1572; ++$_i1576) { + $elem1577 = null; + $xfer += $input->readString($elem1577); + $this->part_vals []= $elem1577; } $xfer += $input->readListEnd(); } else { @@ -189,8 +189,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1571) { - $xfer += $output->writeString($iter1571); + foreach ($this->part_vals as $iter1578) { + $xfer += $output->writeString($iter1578); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partition_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partition_args.php index 226f156cb6f4..5b38432184e8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partition_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partition_args.php @@ -120,16 +120,16 @@ public function read($input) case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1579 = 0; - $_ktype1580 = 0; - $_vtype1581 = 0; - $xfer += $input->readMapBegin($_ktype1580, $_vtype1581, $_size1579); - for ($_i1583 = 0; $_i1583 < $_size1579; ++$_i1583) { - $key1584 = ''; - $val1585 = ''; - $xfer += $input->readString($key1584); - $xfer += $input->readString($val1585); - $this->partitionSpecs[$key1584] = $val1585; + $_size1586 = 0; + $_ktype1587 = 0; + $_vtype1588 = 0; + $xfer += $input->readMapBegin($_ktype1587, $_vtype1588, $_size1586); + for ($_i1590 = 0; $_i1590 < $_size1586; ++$_i1590) { + $key1591 = ''; + $val1592 = ''; + $xfer += $input->readString($key1591); + $xfer += $input->readString($val1592); + $this->partitionSpecs[$key1591] = $val1592; } $xfer += $input->readMapEnd(); } else { @@ -184,9 +184,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('partitionSpecs', TType::MAP, 1); $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); - foreach ($this->partitionSpecs as $kiter1586 => $viter1587) { - $xfer += $output->writeString($kiter1586); - $xfer += $output->writeString($viter1587); + foreach ($this->partitionSpecs as $kiter1593 => $viter1594) { + $xfer += $output->writeString($kiter1593); + $xfer += $output->writeString($viter1594); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_args.php index 582a0668db2d..75f568fb8f8d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_args.php @@ -120,16 +120,16 @@ public function read($input) case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size1588 = 0; - $_ktype1589 = 0; - $_vtype1590 = 0; - $xfer += $input->readMapBegin($_ktype1589, $_vtype1590, $_size1588); - for ($_i1592 = 0; $_i1592 < $_size1588; ++$_i1592) { - $key1593 = ''; - $val1594 = ''; - $xfer += $input->readString($key1593); - $xfer += $input->readString($val1594); - $this->partitionSpecs[$key1593] = $val1594; + $_size1595 = 0; + $_ktype1596 = 0; + $_vtype1597 = 0; + $xfer += $input->readMapBegin($_ktype1596, $_vtype1597, $_size1595); + for ($_i1599 = 0; $_i1599 < $_size1595; ++$_i1599) { + $key1600 = ''; + $val1601 = ''; + $xfer += $input->readString($key1600); + $xfer += $input->readString($val1601); + $this->partitionSpecs[$key1600] = $val1601; } $xfer += $input->readMapEnd(); } else { @@ -184,9 +184,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('partitionSpecs', TType::MAP, 1); $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); - foreach ($this->partitionSpecs as $kiter1595 => $viter1596) { - $xfer += $output->writeString($kiter1595); - $xfer += $output->writeString($viter1596); + foreach ($this->partitionSpecs as $kiter1602 => $viter1603) { + $xfer += $output->writeString($kiter1602); + $xfer += $output->writeString($viter1603); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_result.php index aa6f6904c5d6..95e90156f504 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_exchange_partitions_result.php @@ -121,14 +121,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1597 = 0; - $_etype1600 = 0; - $xfer += $input->readListBegin($_etype1600, $_size1597); - for ($_i1601 = 0; $_i1601 < $_size1597; ++$_i1601) { - $elem1602 = null; - $elem1602 = new \metastore\Partition(); - $xfer += $elem1602->read($input); - $this->success []= $elem1602; + $_size1604 = 0; + $_etype1607 = 0; + $xfer += $input->readListBegin($_etype1607, $_size1604); + for ($_i1608 = 0; $_i1608 < $_size1604; ++$_i1608) { + $elem1609 = null; + $elem1609 = new \metastore\Partition(); + $xfer += $elem1609->read($input); + $this->success []= $elem1609; } $xfer += $input->readListEnd(); } else { @@ -187,8 +187,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1603) { - $xfer += $iter1603->write($output); + foreach ($this->success as $iter1610) { + $xfer += $iter1610->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_find_columns_with_stats_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_find_columns_with_stats_result.php index bf71f8bad539..6d627c4a27a8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_find_columns_with_stats_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_find_columns_with_stats_result.php @@ -68,13 +68,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1878 = 0; - $_etype1881 = 0; - $xfer += $input->readListBegin($_etype1881, $_size1878); - for ($_i1882 = 0; $_i1882 < $_size1878; ++$_i1882) { - $elem1883 = null; - $xfer += $input->readString($elem1883); - $this->success []= $elem1883; + $_size1885 = 0; + $_etype1888 = 0; + $xfer += $input->readListBegin($_etype1888, $_size1885); + for ($_i1889 = 0; $_i1889 < $_size1885; ++$_i1889) { + $elem1890 = null; + $xfer += $input->readString($elem1890); + $this->success []= $elem1890; } $xfer += $input->readListEnd(); } else { @@ -101,8 +101,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1884) { - $xfer += $output->writeString($iter1884); + foreach ($this->success as $iter1891) { + $xfer += $output->writeString($iter1891); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_databases_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_databases_result.php index e52138309bef..a5860860e68a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_databases_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_databases_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1353 = 0; - $_etype1356 = 0; - $xfer += $input->readListBegin($_etype1356, $_size1353); - for ($_i1357 = 0; $_i1357 < $_size1353; ++$_i1357) { - $elem1358 = null; - $xfer += $input->readString($elem1358); - $this->success []= $elem1358; + $_size1360 = 0; + $_etype1363 = 0; + $xfer += $input->readListBegin($_etype1363, $_size1360); + for ($_i1364 = 0; $_i1364 < $_size1360; ++$_i1364) { + $elem1365 = null; + $xfer += $input->readString($elem1365); + $this->success []= $elem1365; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1359) { - $xfer += $output->writeString($iter1359); + foreach ($this->success as $iter1366) { + $xfer += $output->writeString($iter1366); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result.php index 464a804f21d9..ab6bc80c5919 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_materialized_view_objects_for_rewriting_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1467 = 0; - $_etype1470 = 0; - $xfer += $input->readListBegin($_etype1470, $_size1467); - for ($_i1471 = 0; $_i1471 < $_size1467; ++$_i1471) { - $elem1472 = null; - $elem1472 = new \metastore\Table(); - $xfer += $elem1472->read($input); - $this->success []= $elem1472; + $_size1474 = 0; + $_etype1477 = 0; + $xfer += $input->readListBegin($_etype1477, $_size1474); + for ($_i1478 = 0; $_i1478 < $_size1474; ++$_i1478) { + $elem1479 = null; + $elem1479 = new \metastore\Table(); + $xfer += $elem1479->read($input); + $this->success []= $elem1479; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1473) { - $xfer += $iter1473->write($output); + foreach ($this->success as $iter1480) { + $xfer += $iter1480->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_packages_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_packages_result.php index c14aeda7dc91..0be3936c19e4 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_packages_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_packages_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1906 = 0; - $_etype1909 = 0; - $xfer += $input->readListBegin($_etype1909, $_size1906); - for ($_i1910 = 0; $_i1910 < $_size1906; ++$_i1910) { - $elem1911 = null; - $xfer += $input->readString($elem1911); - $this->success []= $elem1911; + $_size1913 = 0; + $_etype1916 = 0; + $xfer += $input->readListBegin($_etype1916, $_size1913); + for ($_i1917 = 0; $_i1917 < $_size1913; ++$_i1917) { + $elem1918 = null; + $xfer += $input->readString($elem1918); + $this->success []= $elem1918; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1912) { - $xfer += $output->writeString($iter1912); + foreach ($this->success as $iter1919) { + $xfer += $output->writeString($iter1919); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_stored_procedures_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_stored_procedures_result.php index b6c42bf25aea..3bf12bef1c9a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_stored_procedures_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_stored_procedures_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1899 = 0; - $_etype1902 = 0; - $xfer += $input->readListBegin($_etype1902, $_size1899); - for ($_i1903 = 0; $_i1903 < $_size1899; ++$_i1903) { - $elem1904 = null; - $xfer += $input->readString($elem1904); - $this->success []= $elem1904; + $_size1906 = 0; + $_etype1909 = 0; + $xfer += $input->readListBegin($_etype1909, $_size1906); + for ($_i1910 = 0; $_i1910 < $_size1906; ++$_i1910) { + $elem1911 = null; + $xfer += $input->readString($elem1911); + $this->success []= $elem1911; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1905) { - $xfer += $output->writeString($iter1905); + foreach ($this->success as $iter1912) { + $xfer += $output->writeString($iter1912); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_tables_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_tables_result.php index 8a9a245ee734..220515f5d1d2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_tables_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_tables_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1495 = 0; - $_etype1498 = 0; - $xfer += $input->readListBegin($_etype1498, $_size1495); - for ($_i1499 = 0; $_i1499 < $_size1495; ++$_i1499) { - $elem1500 = null; - $xfer += $input->readString($elem1500); - $this->success []= $elem1500; + $_size1502 = 0; + $_etype1505 = 0; + $xfer += $input->readListBegin($_etype1505, $_size1502); + for ($_i1506 = 0; $_i1506 < $_size1502; ++$_i1506) { + $elem1507 = null; + $xfer += $input->readString($elem1507); + $this->success []= $elem1507; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1501) { - $xfer += $output->writeString($iter1501); + foreach ($this->success as $iter1508) { + $xfer += $output->writeString($iter1508); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_token_identifiers_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_token_identifiers_result.php index fd3bdbf40935..09590993f1fd 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_token_identifiers_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_token_identifiers_result.php @@ -68,13 +68,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1855 = 0; - $_etype1858 = 0; - $xfer += $input->readListBegin($_etype1858, $_size1855); - for ($_i1859 = 0; $_i1859 < $_size1855; ++$_i1859) { - $elem1860 = null; - $xfer += $input->readString($elem1860); - $this->success []= $elem1860; + $_size1862 = 0; + $_etype1865 = 0; + $xfer += $input->readListBegin($_etype1865, $_size1862); + for ($_i1866 = 0; $_i1866 < $_size1862; ++$_i1866) { + $elem1867 = null; + $xfer += $input->readString($elem1867); + $this->success []= $elem1867; } $xfer += $input->readListEnd(); } else { @@ -101,8 +101,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1861) { - $xfer += $output->writeString($iter1861); + foreach ($this->success as $iter1868) { + $xfer += $output->writeString($iter1868); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_write_event_info_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_write_event_info_result.php index aa956ce7c99c..343de3e5128f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_write_event_info_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_all_write_event_info_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1913 = 0; - $_etype1916 = 0; - $xfer += $input->readListBegin($_etype1916, $_size1913); - for ($_i1917 = 0; $_i1917 < $_size1913; ++$_i1917) { - $elem1918 = null; - $elem1918 = new \metastore\WriteEventInfo(); - $xfer += $elem1918->read($input); - $this->success []= $elem1918; + $_size1920 = 0; + $_etype1923 = 0; + $xfer += $input->readListBegin($_etype1923, $_size1920); + for ($_i1924 = 0; $_i1924 < $_size1920; ++$_i1924) { + $elem1925 = null; + $elem1925 = new \metastore\WriteEventInfo(); + $xfer += $elem1925->read($input); + $this->success []= $elem1925; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1919) { - $xfer += $iter1919->write($output); + foreach ($this->success as $iter1926) { + $xfer += $iter1926->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_databases_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_databases_result.php index cf6afd2c2395..a50a97c3be3c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_databases_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_databases_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1346 = 0; - $_etype1349 = 0; - $xfer += $input->readListBegin($_etype1349, $_size1346); - for ($_i1350 = 0; $_i1350 < $_size1346; ++$_i1350) { - $elem1351 = null; - $xfer += $input->readString($elem1351); - $this->success []= $elem1351; + $_size1353 = 0; + $_etype1356 = 0; + $xfer += $input->readListBegin($_etype1356, $_size1353); + for ($_i1357 = 0; $_i1357 < $_size1353; ++$_i1357) { + $elem1358 = null; + $xfer += $input->readString($elem1358); + $this->success []= $elem1358; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1352) { - $xfer += $output->writeString($iter1352); + foreach ($this->success as $iter1359) { + $xfer += $output->writeString($iter1359); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_dataconnectors_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_dataconnectors_result.php index 41b8faec9aac..9b3dcd5b1f5d 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_dataconnectors_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_dataconnectors_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1360 = 0; - $_etype1363 = 0; - $xfer += $input->readListBegin($_etype1363, $_size1360); - for ($_i1364 = 0; $_i1364 < $_size1360; ++$_i1364) { - $elem1365 = null; - $xfer += $input->readString($elem1365); - $this->success []= $elem1365; + $_size1367 = 0; + $_etype1370 = 0; + $xfer += $input->readListBegin($_etype1370, $_size1367); + for ($_i1371 = 0; $_i1371 < $_size1367; ++$_i1371) { + $elem1372 = null; + $xfer += $input->readString($elem1372); + $this->success []= $elem1372; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1366) { - $xfer += $output->writeString($iter1366); + foreach ($this->success as $iter1373) { + $xfer += $output->writeString($iter1373); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_result.php index 548467e4810e..a1d3bf985fc8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_result.php @@ -108,14 +108,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1376 = 0; - $_etype1379 = 0; - $xfer += $input->readListBegin($_etype1379, $_size1376); - for ($_i1380 = 0; $_i1380 < $_size1376; ++$_i1380) { - $elem1381 = null; - $elem1381 = new \metastore\FieldSchema(); - $xfer += $elem1381->read($input); - $this->success []= $elem1381; + $_size1383 = 0; + $_etype1386 = 0; + $xfer += $input->readListBegin($_etype1386, $_size1383); + for ($_i1387 = 0; $_i1387 < $_size1383; ++$_i1387) { + $elem1388 = null; + $elem1388 = new \metastore\FieldSchema(); + $xfer += $elem1388->read($input); + $this->success []= $elem1388; } $xfer += $input->readListEnd(); } else { @@ -166,8 +166,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1382) { - $xfer += $iter1382->write($output); + foreach ($this->success as $iter1389) { + $xfer += $iter1389->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_with_environment_context_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_with_environment_context_result.php index 6e8331417d50..ac940fd26b67 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_with_environment_context_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_fields_with_environment_context_result.php @@ -108,14 +108,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1383 = 0; - $_etype1386 = 0; - $xfer += $input->readListBegin($_etype1386, $_size1383); - for ($_i1387 = 0; $_i1387 < $_size1383; ++$_i1387) { - $elem1388 = null; - $elem1388 = new \metastore\FieldSchema(); - $xfer += $elem1388->read($input); - $this->success []= $elem1388; + $_size1390 = 0; + $_etype1393 = 0; + $xfer += $input->readListBegin($_etype1393, $_size1390); + for ($_i1394 = 0; $_i1394 < $_size1390; ++$_i1394) { + $elem1395 = null; + $elem1395 = new \metastore\FieldSchema(); + $xfer += $elem1395->read($input); + $this->success []= $elem1395; } $xfer += $input->readListEnd(); } else { @@ -166,8 +166,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1389) { - $xfer += $iter1389->write($output); + foreach ($this->success as $iter1396) { + $xfer += $iter1396->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_functions_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_functions_result.php index 7515fcd01b3c..72548b7497d8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_functions_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_functions_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1806 = 0; - $_etype1809 = 0; - $xfer += $input->readListBegin($_etype1809, $_size1806); - for ($_i1810 = 0; $_i1810 < $_size1806; ++$_i1810) { - $elem1811 = null; - $xfer += $input->readString($elem1811); - $this->success []= $elem1811; + $_size1813 = 0; + $_etype1816 = 0; + $xfer += $input->readListBegin($_etype1816, $_size1813); + for ($_i1817 = 0; $_i1817 < $_size1813; ++$_i1817) { + $elem1818 = null; + $xfer += $input->readString($elem1818); + $this->success []= $elem1818; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1812) { - $xfer += $output->writeString($iter1812); + foreach ($this->success as $iter1819) { + $xfer += $output->writeString($iter1819); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_master_keys_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_master_keys_result.php index da066c60534b..f05f0168b04c 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_master_keys_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_master_keys_result.php @@ -68,13 +68,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1862 = 0; - $_etype1865 = 0; - $xfer += $input->readListBegin($_etype1865, $_size1862); - for ($_i1866 = 0; $_i1866 < $_size1862; ++$_i1866) { - $elem1867 = null; - $xfer += $input->readString($elem1867); - $this->success []= $elem1867; + $_size1869 = 0; + $_etype1872 = 0; + $xfer += $input->readListBegin($_etype1872, $_size1869); + for ($_i1873 = 0; $_i1873 < $_size1869; ++$_i1873) { + $elem1874 = null; + $xfer += $input->readString($elem1874); + $this->success []= $elem1874; } $xfer += $input->readListEnd(); } else { @@ -101,8 +101,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1868) { - $xfer += $output->writeString($iter1868); + foreach ($this->success as $iter1875) { + $xfer += $output->writeString($iter1875); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_materialized_views_for_rewriting_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_materialized_views_for_rewriting_result.php index e9c2964bc6a6..0e4bd2a7454b 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_materialized_views_for_rewriting_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_materialized_views_for_rewriting_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1474 = 0; - $_etype1477 = 0; - $xfer += $input->readListBegin($_etype1477, $_size1474); - for ($_i1478 = 0; $_i1478 < $_size1474; ++$_i1478) { - $elem1479 = null; - $xfer += $input->readString($elem1479); - $this->success []= $elem1479; + $_size1481 = 0; + $_etype1484 = 0; + $xfer += $input->readListBegin($_etype1484, $_size1481); + for ($_i1485 = 0; $_i1485 < $_size1481; ++$_i1485) { + $elem1486 = null; + $xfer += $input->readString($elem1486); + $this->success []= $elem1486; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1480) { - $xfer += $output->writeString($iter1480); + foreach ($this->success as $iter1487) { + $xfer += $output->writeString($iter1487); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_part_specs_by_filter_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_part_specs_by_filter_result.php index db79d88731f6..8b7e9b7c095e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_part_specs_by_filter_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_part_specs_by_filter_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1723 = 0; - $_etype1726 = 0; - $xfer += $input->readListBegin($_etype1726, $_size1723); - for ($_i1727 = 0; $_i1727 < $_size1723; ++$_i1727) { - $elem1728 = null; - $elem1728 = new \metastore\PartitionSpec(); - $xfer += $elem1728->read($input); - $this->success []= $elem1728; + $_size1730 = 0; + $_etype1733 = 0; + $xfer += $input->readListBegin($_etype1733, $_size1730); + for ($_i1734 = 0; $_i1734 < $_size1730; ++$_i1734) { + $elem1735 = null; + $elem1735 = new \metastore\PartitionSpec(); + $xfer += $elem1735->read($input); + $this->success []= $elem1735; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1729) { - $xfer += $iter1729->write($output); + foreach ($this->success as $iter1736) { + $xfer += $iter1736->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_args.php index 1d43357ad595..f989a977cd45 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_args.php @@ -106,13 +106,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1572 = 0; - $_etype1575 = 0; - $xfer += $input->readListBegin($_etype1575, $_size1572); - for ($_i1576 = 0; $_i1576 < $_size1572; ++$_i1576) { - $elem1577 = null; - $xfer += $input->readString($elem1577); - $this->part_vals []= $elem1577; + $_size1579 = 0; + $_etype1582 = 0; + $xfer += $input->readListBegin($_etype1582, $_size1579); + for ($_i1583 = 0; $_i1583 < $_size1579; ++$_i1583) { + $elem1584 = null; + $xfer += $input->readString($elem1584); + $this->part_vals []= $elem1584; } $xfer += $input->readListEnd(); } else { @@ -149,8 +149,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1578) { - $xfer += $output->writeString($iter1578); + foreach ($this->part_vals as $iter1585) { + $xfer += $output->writeString($iter1585); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_args.php index 5dfca92a5356..08591f2cb016 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_args.php @@ -118,13 +118,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1688 = 0; - $_etype1691 = 0; - $xfer += $input->readListBegin($_etype1691, $_size1688); - for ($_i1692 = 0; $_i1692 < $_size1688; ++$_i1692) { - $elem1693 = null; - $xfer += $input->readString($elem1693); - $this->part_vals []= $elem1693; + $_size1695 = 0; + $_etype1698 = 0; + $xfer += $input->readListBegin($_etype1698, $_size1695); + for ($_i1699 = 0; $_i1699 < $_size1695; ++$_i1699) { + $elem1700 = null; + $xfer += $input->readString($elem1700); + $this->part_vals []= $elem1700; } $xfer += $input->readListEnd(); } else { @@ -168,8 +168,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1694) { - $xfer += $output->writeString($iter1694); + foreach ($this->part_vals as $iter1701) { + $xfer += $output->writeString($iter1701); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_result.php index f6cac7f439f5..ec1b1405abc2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_ps_result.php @@ -94,13 +94,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1695 = 0; - $_etype1698 = 0; - $xfer += $input->readListBegin($_etype1698, $_size1695); - for ($_i1699 = 0; $_i1699 < $_size1695; ++$_i1699) { - $elem1700 = null; - $xfer += $input->readString($elem1700); - $this->success []= $elem1700; + $_size1702 = 0; + $_etype1705 = 0; + $xfer += $input->readListBegin($_etype1705, $_size1702); + for ($_i1706 = 0; $_i1706 < $_size1702; ++$_i1706) { + $elem1707 = null; + $xfer += $input->readString($elem1707); + $this->success []= $elem1707; } $xfer += $input->readListEnd(); } else { @@ -143,8 +143,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1701) { - $xfer += $output->writeString($iter1701); + foreach ($this->success as $iter1708) { + $xfer += $output->writeString($iter1708); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_req_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_req_result.php index 927ddd467d7b..4ff05c25c732 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_req_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_req_result.php @@ -94,13 +94,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1702 = 0; - $_etype1705 = 0; - $xfer += $input->readListBegin($_etype1705, $_size1702); - for ($_i1706 = 0; $_i1706 < $_size1702; ++$_i1706) { - $elem1707 = null; - $xfer += $input->readString($elem1707); - $this->success []= $elem1707; + $_size1709 = 0; + $_etype1712 = 0; + $xfer += $input->readListBegin($_etype1712, $_size1709); + for ($_i1713 = 0; $_i1713 < $_size1709; ++$_i1713) { + $elem1714 = null; + $xfer += $input->readString($elem1714); + $this->success []= $elem1714; } $xfer += $input->readListEnd(); } else { @@ -143,8 +143,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1708) { - $xfer += $output->writeString($iter1708); + foreach ($this->success as $iter1715) { + $xfer += $output->writeString($iter1715); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_result.php index a3f8299255ba..cdc873884469 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_names_result.php @@ -94,13 +94,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1646 = 0; - $_etype1649 = 0; - $xfer += $input->readListBegin($_etype1649, $_size1646); - for ($_i1650 = 0; $_i1650 < $_size1646; ++$_i1650) { - $elem1651 = null; - $xfer += $input->readString($elem1651); - $this->success []= $elem1651; + $_size1653 = 0; + $_etype1656 = 0; + $xfer += $input->readListBegin($_etype1656, $_size1653); + for ($_i1657 = 0; $_i1657 < $_size1653; ++$_i1657) { + $elem1658 = null; + $xfer += $input->readString($elem1658); + $this->success []= $elem1658; } $xfer += $input->readListEnd(); } else { @@ -143,8 +143,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1652) { - $xfer += $output->writeString($iter1652); + foreach ($this->success as $iter1659) { + $xfer += $output->writeString($iter1659); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_with_auth_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_with_auth_args.php index 8e79331510ae..8fb33e53d09f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_with_auth_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partition_with_auth_args.php @@ -134,13 +134,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1604 = 0; - $_etype1607 = 0; - $xfer += $input->readListBegin($_etype1607, $_size1604); - for ($_i1608 = 0; $_i1608 < $_size1604; ++$_i1608) { - $elem1609 = null; - $xfer += $input->readString($elem1609); - $this->part_vals []= $elem1609; + $_size1611 = 0; + $_etype1614 = 0; + $xfer += $input->readListBegin($_etype1614, $_size1611); + for ($_i1615 = 0; $_i1615 < $_size1611; ++$_i1615) { + $elem1616 = null; + $xfer += $input->readString($elem1616); + $this->part_vals []= $elem1616; } $xfer += $input->readListEnd(); } else { @@ -157,13 +157,13 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1610 = 0; - $_etype1613 = 0; - $xfer += $input->readListBegin($_etype1613, $_size1610); - for ($_i1614 = 0; $_i1614 < $_size1610; ++$_i1614) { - $elem1615 = null; - $xfer += $input->readString($elem1615); - $this->group_names []= $elem1615; + $_size1617 = 0; + $_etype1620 = 0; + $xfer += $input->readListBegin($_etype1620, $_size1617); + for ($_i1621 = 0; $_i1621 < $_size1617; ++$_i1621) { + $elem1622 = null; + $xfer += $input->readString($elem1622); + $this->group_names []= $elem1622; } $xfer += $input->readListEnd(); } else { @@ -200,8 +200,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1616) { - $xfer += $output->writeString($iter1616); + foreach ($this->part_vals as $iter1623) { + $xfer += $output->writeString($iter1623); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -217,8 +217,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('group_names', TType::LST, 5); $output->writeListBegin(TType::STRING, count($this->group_names)); - foreach ($this->group_names as $iter1617) { - $xfer += $output->writeString($iter1617); + foreach ($this->group_names as $iter1624) { + $xfer += $output->writeString($iter1624); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_req_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_req_result.php index accd7d41558b..e88cd7eb667f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_req_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_req_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1716 = 0; - $_etype1719 = 0; - $xfer += $input->readListBegin($_etype1719, $_size1716); - for ($_i1720 = 0; $_i1720 < $_size1716; ++$_i1720) { - $elem1721 = null; - $elem1721 = new \metastore\Partition(); - $xfer += $elem1721->read($input); - $this->success []= $elem1721; + $_size1723 = 0; + $_etype1726 = 0; + $xfer += $input->readListBegin($_etype1726, $_size1723); + for ($_i1727 = 0; $_i1727 < $_size1723; ++$_i1727) { + $elem1728 = null; + $elem1728 = new \metastore\Partition(); + $xfer += $elem1728->read($input); + $this->success []= $elem1728; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1722) { - $xfer += $iter1722->write($output); + foreach ($this->success as $iter1729) { + $xfer += $iter1729->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_result.php index 3479e5db697f..538238196fd2 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_filter_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1709 = 0; - $_etype1712 = 0; - $xfer += $input->readListBegin($_etype1712, $_size1709); - for ($_i1713 = 0; $_i1713 < $_size1709; ++$_i1713) { - $elem1714 = null; - $elem1714 = new \metastore\Partition(); - $xfer += $elem1714->read($input); - $this->success []= $elem1714; + $_size1716 = 0; + $_etype1719 = 0; + $xfer += $input->readListBegin($_etype1719, $_size1716); + for ($_i1720 = 0; $_i1720 < $_size1716; ++$_i1720) { + $elem1721 = null; + $elem1721 = new \metastore\Partition(); + $xfer += $elem1721->read($input); + $this->success []= $elem1721; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1715) { - $xfer += $iter1715->write($output); + foreach ($this->success as $iter1722) { + $xfer += $iter1722->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_args.php index e49950967509..e13c697443a4 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_args.php @@ -106,13 +106,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size1730 = 0; - $_etype1733 = 0; - $xfer += $input->readListBegin($_etype1733, $_size1730); - for ($_i1734 = 0; $_i1734 < $_size1730; ++$_i1734) { - $elem1735 = null; - $xfer += $input->readString($elem1735); - $this->names []= $elem1735; + $_size1737 = 0; + $_etype1740 = 0; + $xfer += $input->readListBegin($_etype1740, $_size1737); + for ($_i1741 = 0; $_i1741 < $_size1737; ++$_i1741) { + $elem1742 = null; + $xfer += $input->readString($elem1742); + $this->names []= $elem1742; } $xfer += $input->readListEnd(); } else { @@ -149,8 +149,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('names', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->names)); - foreach ($this->names as $iter1736) { - $xfer += $output->writeString($iter1736); + foreach ($this->names as $iter1743) { + $xfer += $output->writeString($iter1743); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_result.php index 616ae5afaac1..3094d1016d23 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_by_names_result.php @@ -108,14 +108,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1737 = 0; - $_etype1740 = 0; - $xfer += $input->readListBegin($_etype1740, $_size1737); - for ($_i1741 = 0; $_i1741 < $_size1737; ++$_i1741) { - $elem1742 = null; - $elem1742 = new \metastore\Partition(); - $xfer += $elem1742->read($input); - $this->success []= $elem1742; + $_size1744 = 0; + $_etype1747 = 0; + $xfer += $input->readListBegin($_etype1747, $_size1744); + for ($_i1748 = 0; $_i1748 < $_size1744; ++$_i1748) { + $elem1749 = null; + $elem1749 = new \metastore\Partition(); + $xfer += $elem1749->read($input); + $this->success []= $elem1749; } $xfer += $input->readListEnd(); } else { @@ -166,8 +166,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1743) { - $xfer += $iter1743->write($output); + foreach ($this->success as $iter1750) { + $xfer += $iter1750->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_args.php index b51b393bf67c..e1e17f875ea1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_args.php @@ -118,13 +118,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1653 = 0; - $_etype1656 = 0; - $xfer += $input->readListBegin($_etype1656, $_size1653); - for ($_i1657 = 0; $_i1657 < $_size1653; ++$_i1657) { - $elem1658 = null; - $xfer += $input->readString($elem1658); - $this->part_vals []= $elem1658; + $_size1660 = 0; + $_etype1663 = 0; + $xfer += $input->readListBegin($_etype1663, $_size1660); + for ($_i1664 = 0; $_i1664 < $_size1660; ++$_i1664) { + $elem1665 = null; + $xfer += $input->readString($elem1665); + $this->part_vals []= $elem1665; } $xfer += $input->readListEnd(); } else { @@ -168,8 +168,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1659) { - $xfer += $output->writeString($iter1659); + foreach ($this->part_vals as $iter1666) { + $xfer += $output->writeString($iter1666); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_result.php index a4751d3ba9f0..685bd5a914da 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1660 = 0; - $_etype1663 = 0; - $xfer += $input->readListBegin($_etype1663, $_size1660); - for ($_i1664 = 0; $_i1664 < $_size1660; ++$_i1664) { - $elem1665 = null; - $elem1665 = new \metastore\Partition(); - $xfer += $elem1665->read($input); - $this->success []= $elem1665; + $_size1667 = 0; + $_etype1670 = 0; + $xfer += $input->readListBegin($_etype1670, $_size1667); + for ($_i1671 = 0; $_i1671 < $_size1667; ++$_i1671) { + $elem1672 = null; + $elem1672 = new \metastore\Partition(); + $xfer += $elem1672->read($input); + $this->success []= $elem1672; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1666) { - $xfer += $iter1666->write($output); + foreach ($this->success as $iter1673) { + $xfer += $iter1673->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_args.php index fe977507604b..3b63b0f437c0 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_args.php @@ -146,13 +146,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1667 = 0; - $_etype1670 = 0; - $xfer += $input->readListBegin($_etype1670, $_size1667); - for ($_i1671 = 0; $_i1671 < $_size1667; ++$_i1671) { - $elem1672 = null; - $xfer += $input->readString($elem1672); - $this->part_vals []= $elem1672; + $_size1674 = 0; + $_etype1677 = 0; + $xfer += $input->readListBegin($_etype1677, $_size1674); + for ($_i1678 = 0; $_i1678 < $_size1674; ++$_i1678) { + $elem1679 = null; + $xfer += $input->readString($elem1679); + $this->part_vals []= $elem1679; } $xfer += $input->readListEnd(); } else { @@ -176,13 +176,13 @@ public function read($input) case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1673 = 0; - $_etype1676 = 0; - $xfer += $input->readListBegin($_etype1676, $_size1673); - for ($_i1677 = 0; $_i1677 < $_size1673; ++$_i1677) { - $elem1678 = null; - $xfer += $input->readString($elem1678); - $this->group_names []= $elem1678; + $_size1680 = 0; + $_etype1683 = 0; + $xfer += $input->readListBegin($_etype1683, $_size1680); + for ($_i1684 = 0; $_i1684 < $_size1680; ++$_i1684) { + $elem1685 = null; + $xfer += $input->readString($elem1685); + $this->group_names []= $elem1685; } $xfer += $input->readListEnd(); } else { @@ -219,8 +219,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1679) { - $xfer += $output->writeString($iter1679); + foreach ($this->part_vals as $iter1686) { + $xfer += $output->writeString($iter1686); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); @@ -241,8 +241,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('group_names', TType::LST, 6); $output->writeListBegin(TType::STRING, count($this->group_names)); - foreach ($this->group_names as $iter1680) { - $xfer += $output->writeString($iter1680); + foreach ($this->group_names as $iter1687) { + $xfer += $output->writeString($iter1687); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_result.php index cd3ea604870b..f19d8923c62a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_ps_with_auth_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1681 = 0; - $_etype1684 = 0; - $xfer += $input->readListBegin($_etype1684, $_size1681); - for ($_i1685 = 0; $_i1685 < $_size1681; ++$_i1685) { - $elem1686 = null; - $elem1686 = new \metastore\Partition(); - $xfer += $elem1686->read($input); - $this->success []= $elem1686; + $_size1688 = 0; + $_etype1691 = 0; + $xfer += $input->readListBegin($_etype1691, $_size1688); + for ($_i1692 = 0; $_i1692 < $_size1688; ++$_i1692) { + $elem1693 = null; + $elem1693 = new \metastore\Partition(); + $xfer += $elem1693->read($input); + $this->success []= $elem1693; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1687) { - $xfer += $iter1687->write($output); + foreach ($this->success as $iter1694) { + $xfer += $iter1694->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_pspec_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_pspec_result.php index 7e25f59da25c..0ab3c0c1e525 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_pspec_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_pspec_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1639 = 0; - $_etype1642 = 0; - $xfer += $input->readListBegin($_etype1642, $_size1639); - for ($_i1643 = 0; $_i1643 < $_size1639; ++$_i1643) { - $elem1644 = null; - $elem1644 = new \metastore\PartitionSpec(); - $xfer += $elem1644->read($input); - $this->success []= $elem1644; + $_size1646 = 0; + $_etype1649 = 0; + $xfer += $input->readListBegin($_etype1649, $_size1646); + for ($_i1650 = 0; $_i1650 < $_size1646; ++$_i1650) { + $elem1651 = null; + $elem1651 = new \metastore\PartitionSpec(); + $xfer += $elem1651->read($input); + $this->success []= $elem1651; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1645) { - $xfer += $iter1645->write($output); + foreach ($this->success as $iter1652) { + $xfer += $iter1652->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_result.php index 42c9a65c494e..6566002f3a3f 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1618 = 0; - $_etype1621 = 0; - $xfer += $input->readListBegin($_etype1621, $_size1618); - for ($_i1622 = 0; $_i1622 < $_size1618; ++$_i1622) { - $elem1623 = null; - $elem1623 = new \metastore\Partition(); - $xfer += $elem1623->read($input); - $this->success []= $elem1623; + $_size1625 = 0; + $_etype1628 = 0; + $xfer += $input->readListBegin($_etype1628, $_size1625); + for ($_i1629 = 0; $_i1629 < $_size1625; ++$_i1629) { + $elem1630 = null; + $elem1630 = new \metastore\Partition(); + $xfer += $elem1630->read($input); + $this->success []= $elem1630; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1624) { - $xfer += $iter1624->write($output); + foreach ($this->success as $iter1631) { + $xfer += $iter1631->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_args.php index b54451581882..ebd29d773a67 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_args.php @@ -144,13 +144,13 @@ public function read($input) case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1625 = 0; - $_etype1628 = 0; - $xfer += $input->readListBegin($_etype1628, $_size1625); - for ($_i1629 = 0; $_i1629 < $_size1625; ++$_i1629) { - $elem1630 = null; - $xfer += $input->readString($elem1630); - $this->group_names []= $elem1630; + $_size1632 = 0; + $_etype1635 = 0; + $xfer += $input->readListBegin($_etype1635, $_size1632); + for ($_i1636 = 0; $_i1636 < $_size1632; ++$_i1636) { + $elem1637 = null; + $xfer += $input->readString($elem1637); + $this->group_names []= $elem1637; } $xfer += $input->readListEnd(); } else { @@ -197,8 +197,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('group_names', TType::LST, 5); $output->writeListBegin(TType::STRING, count($this->group_names)); - foreach ($this->group_names as $iter1631) { - $xfer += $output->writeString($iter1631); + foreach ($this->group_names as $iter1638) { + $xfer += $output->writeString($iter1638); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_result.php index 9f06b78f393e..27169e09b5f8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_partitions_with_auth_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1632 = 0; - $_etype1635 = 0; - $xfer += $input->readListBegin($_etype1635, $_size1632); - for ($_i1636 = 0; $_i1636 < $_size1632; ++$_i1636) { - $elem1637 = null; - $elem1637 = new \metastore\Partition(); - $xfer += $elem1637->read($input); - $this->success []= $elem1637; + $_size1639 = 0; + $_etype1642 = 0; + $xfer += $input->readListBegin($_etype1642, $_size1639); + for ($_i1643 = 0; $_i1643 < $_size1639; ++$_i1643) { + $elem1644 = null; + $elem1644 = new \metastore\Partition(); + $xfer += $elem1644->read($input); + $this->success []= $elem1644; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1638) { - $xfer += $iter1638->write($output); + foreach ($this->success as $iter1645) { + $xfer += $iter1645->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_privilege_set_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_privilege_set_args.php index ab1303b1f8ca..3e701a3cd77e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_privilege_set_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_privilege_set_args.php @@ -108,13 +108,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1827 = 0; - $_etype1830 = 0; - $xfer += $input->readListBegin($_etype1830, $_size1827); - for ($_i1831 = 0; $_i1831 < $_size1827; ++$_i1831) { - $elem1832 = null; - $xfer += $input->readString($elem1832); - $this->group_names []= $elem1832; + $_size1834 = 0; + $_etype1837 = 0; + $xfer += $input->readListBegin($_etype1837, $_size1834); + for ($_i1838 = 0; $_i1838 < $_size1834; ++$_i1838) { + $elem1839 = null; + $xfer += $input->readString($elem1839); + $this->group_names []= $elem1839; } $xfer += $input->readListEnd(); } else { @@ -154,8 +154,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('group_names', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->group_names)); - foreach ($this->group_names as $iter1833) { - $xfer += $output->writeString($iter1833); + foreach ($this->group_names as $iter1840) { + $xfer += $output->writeString($iter1840); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_role_names_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_role_names_result.php index 7353970ddf4c..ee595f86e948 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_role_names_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_role_names_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1813 = 0; - $_etype1816 = 0; - $xfer += $input->readListBegin($_etype1816, $_size1813); - for ($_i1817 = 0; $_i1817 < $_size1813; ++$_i1817) { - $elem1818 = null; - $xfer += $input->readString($elem1818); - $this->success []= $elem1818; + $_size1820 = 0; + $_etype1823 = 0; + $xfer += $input->readListBegin($_etype1823, $_size1820); + for ($_i1824 = 0; $_i1824 < $_size1820; ++$_i1824) { + $elem1825 = null; + $xfer += $input->readString($elem1825); + $this->success []= $elem1825; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1819) { - $xfer += $output->writeString($iter1819); + foreach ($this->success as $iter1826) { + $xfer += $output->writeString($iter1826); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_runtime_stats_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_runtime_stats_result.php index 051d4230459a..24d692ee6fa1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_runtime_stats_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_runtime_stats_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1892 = 0; - $_etype1895 = 0; - $xfer += $input->readListBegin($_etype1895, $_size1892); - for ($_i1896 = 0; $_i1896 < $_size1892; ++$_i1896) { - $elem1897 = null; - $elem1897 = new \metastore\RuntimeStat(); - $xfer += $elem1897->read($input); - $this->success []= $elem1897; + $_size1899 = 0; + $_etype1902 = 0; + $xfer += $input->readListBegin($_etype1902, $_size1899); + for ($_i1903 = 0; $_i1903 < $_size1899; ++$_i1903) { + $elem1904 = null; + $elem1904 = new \metastore\RuntimeStat(); + $xfer += $elem1904->read($input); + $this->success []= $elem1904; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1898) { - $xfer += $iter1898->write($output); + foreach ($this->success as $iter1905) { + $xfer += $iter1905->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_all_versions_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_all_versions_result.php index b3afca738165..b532ad412a52 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_all_versions_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_all_versions_result.php @@ -95,14 +95,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1885 = 0; - $_etype1888 = 0; - $xfer += $input->readListBegin($_etype1888, $_size1885); - for ($_i1889 = 0; $_i1889 < $_size1885; ++$_i1889) { - $elem1890 = null; - $elem1890 = new \metastore\SchemaVersion(); - $xfer += $elem1890->read($input); - $this->success []= $elem1890; + $_size1892 = 0; + $_etype1895 = 0; + $xfer += $input->readListBegin($_etype1895, $_size1892); + for ($_i1896 = 0; $_i1896 < $_size1892; ++$_i1896) { + $elem1897 = null; + $elem1897 = new \metastore\SchemaVersion(); + $xfer += $elem1897->read($input); + $this->success []= $elem1897; } $xfer += $input->readListEnd(); } else { @@ -145,8 +145,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1891) { - $xfer += $iter1891->write($output); + foreach ($this->success as $iter1898) { + $xfer += $iter1898->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_result.php index f6ae9424cb74..70ed35fa2bc9 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_result.php @@ -108,14 +108,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1390 = 0; - $_etype1393 = 0; - $xfer += $input->readListBegin($_etype1393, $_size1390); - for ($_i1394 = 0; $_i1394 < $_size1390; ++$_i1394) { - $elem1395 = null; - $elem1395 = new \metastore\FieldSchema(); - $xfer += $elem1395->read($input); - $this->success []= $elem1395; + $_size1397 = 0; + $_etype1400 = 0; + $xfer += $input->readListBegin($_etype1400, $_size1397); + for ($_i1401 = 0; $_i1401 < $_size1397; ++$_i1401) { + $elem1402 = null; + $elem1402 = new \metastore\FieldSchema(); + $xfer += $elem1402->read($input); + $this->success []= $elem1402; } $xfer += $input->readListEnd(); } else { @@ -166,8 +166,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1396) { - $xfer += $iter1396->write($output); + foreach ($this->success as $iter1403) { + $xfer += $iter1403->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_with_environment_context_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_with_environment_context_result.php index adc09cc0e92b..234e74394ebe 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_with_environment_context_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_schema_with_environment_context_result.php @@ -108,14 +108,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1397 = 0; - $_etype1400 = 0; - $xfer += $input->readListBegin($_etype1400, $_size1397); - for ($_i1401 = 0; $_i1401 < $_size1397; ++$_i1401) { - $elem1402 = null; - $elem1402 = new \metastore\FieldSchema(); - $xfer += $elem1402->read($input); - $this->success []= $elem1402; + $_size1404 = 0; + $_etype1407 = 0; + $xfer += $input->readListBegin($_etype1407, $_size1404); + for ($_i1408 = 0; $_i1408 < $_size1404; ++$_i1408) { + $elem1409 = null; + $elem1409 = new \metastore\FieldSchema(); + $xfer += $elem1409->read($input); + $this->success []= $elem1409; } $xfer += $input->readListEnd(); } else { @@ -166,8 +166,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1403) { - $xfer += $iter1403->write($output); + foreach ($this->success as $iter1410) { + $xfer += $iter1410->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_args.php index eb41013a39aa..adf3a47ff11a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_args.php @@ -106,13 +106,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size1481 = 0; - $_etype1484 = 0; - $xfer += $input->readListBegin($_etype1484, $_size1481); - for ($_i1485 = 0; $_i1485 < $_size1481; ++$_i1485) { - $elem1486 = null; - $xfer += $input->readString($elem1486); - $this->tbl_types []= $elem1486; + $_size1488 = 0; + $_etype1491 = 0; + $xfer += $input->readListBegin($_etype1491, $_size1488); + for ($_i1492 = 0; $_i1492 < $_size1488; ++$_i1492) { + $elem1493 = null; + $xfer += $input->readString($elem1493); + $this->tbl_types []= $elem1493; } $xfer += $input->readListEnd(); } else { @@ -149,8 +149,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('tbl_types', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->tbl_types)); - foreach ($this->tbl_types as $iter1487) { - $xfer += $output->writeString($iter1487); + foreach ($this->tbl_types as $iter1494) { + $xfer += $output->writeString($iter1494); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_result.php index fa85fda009df..a991856ebb7a 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_meta_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1488 = 0; - $_etype1491 = 0; - $xfer += $input->readListBegin($_etype1491, $_size1488); - for ($_i1492 = 0; $_i1492 < $_size1488; ++$_i1492) { - $elem1493 = null; - $elem1493 = new \metastore\TableMeta(); - $xfer += $elem1493->read($input); - $this->success []= $elem1493; + $_size1495 = 0; + $_etype1498 = 0; + $xfer += $input->readListBegin($_etype1498, $_size1495); + for ($_i1499 = 0; $_i1499 < $_size1495; ++$_i1499) { + $elem1500 = null; + $elem1500 = new \metastore\TableMeta(); + $xfer += $elem1500->read($input); + $this->success []= $elem1500; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1494) { - $xfer += $iter1494->write($output); + foreach ($this->success as $iter1501) { + $xfer += $iter1501->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_names_by_filter_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_names_by_filter_result.php index e125030e775a..f0490e007289 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_names_by_filter_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_names_by_filter_result.php @@ -107,13 +107,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1523 = 0; - $_etype1526 = 0; - $xfer += $input->readListBegin($_etype1526, $_size1523); - for ($_i1527 = 0; $_i1527 < $_size1523; ++$_i1527) { - $elem1528 = null; - $xfer += $input->readString($elem1528); - $this->success []= $elem1528; + $_size1530 = 0; + $_etype1533 = 0; + $xfer += $input->readListBegin($_etype1533, $_size1530); + for ($_i1534 = 0; $_i1534 < $_size1530; ++$_i1534) { + $elem1535 = null; + $xfer += $input->readString($elem1535); + $this->success []= $elem1535; } $xfer += $input->readListEnd(); } else { @@ -164,8 +164,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1529) { - $xfer += $output->writeString($iter1529); + foreach ($this->success as $iter1536) { + $xfer += $output->writeString($iter1536); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_args.php index 8bdb9756a4b3..c8b8d72e0229 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_args.php @@ -87,13 +87,13 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size1502 = 0; - $_etype1505 = 0; - $xfer += $input->readListBegin($_etype1505, $_size1502); - for ($_i1506 = 0; $_i1506 < $_size1502; ++$_i1506) { - $elem1507 = null; - $xfer += $input->readString($elem1507); - $this->tbl_names []= $elem1507; + $_size1509 = 0; + $_etype1512 = 0; + $xfer += $input->readListBegin($_etype1512, $_size1509); + for ($_i1513 = 0; $_i1513 < $_size1509; ++$_i1513) { + $elem1514 = null; + $xfer += $input->readString($elem1514); + $this->tbl_names []= $elem1514; } $xfer += $input->readListEnd(); } else { @@ -125,8 +125,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('tbl_names', TType::LST, 2); $output->writeListBegin(TType::STRING, count($this->tbl_names)); - foreach ($this->tbl_names as $iter1508) { - $xfer += $output->writeString($iter1508); + foreach ($this->tbl_names as $iter1515) { + $xfer += $output->writeString($iter1515); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_result.php index fccbc17d1db8..b1da98ed7ad8 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_table_objects_by_name_result.php @@ -69,14 +69,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1509 = 0; - $_etype1512 = 0; - $xfer += $input->readListBegin($_etype1512, $_size1509); - for ($_i1513 = 0; $_i1513 < $_size1509; ++$_i1513) { - $elem1514 = null; - $elem1514 = new \metastore\Table(); - $xfer += $elem1514->read($input); - $this->success []= $elem1514; + $_size1516 = 0; + $_etype1519 = 0; + $xfer += $input->readListBegin($_etype1519, $_size1516); + for ($_i1520 = 0; $_i1520 < $_size1516; ++$_i1520) { + $elem1521 = null; + $elem1521 = new \metastore\Table(); + $xfer += $elem1521->read($input); + $this->success []= $elem1521; } $xfer += $input->readListEnd(); } else { @@ -103,8 +103,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1515) { - $xfer += $iter1515->write($output); + foreach ($this->success as $iter1522) { + $xfer += $iter1522->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_by_type_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_by_type_result.php index f5819a715114..3c10e66ead77 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_by_type_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_by_type_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1460 = 0; - $_etype1463 = 0; - $xfer += $input->readListBegin($_etype1463, $_size1460); - for ($_i1464 = 0; $_i1464 < $_size1460; ++$_i1464) { - $elem1465 = null; - $xfer += $input->readString($elem1465); - $this->success []= $elem1465; + $_size1467 = 0; + $_etype1470 = 0; + $xfer += $input->readListBegin($_etype1470, $_size1467); + for ($_i1471 = 0; $_i1471 < $_size1467; ++$_i1471) { + $elem1472 = null; + $xfer += $input->readString($elem1472); + $this->success []= $elem1472; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1466) { - $xfer += $output->writeString($iter1466); + foreach ($this->success as $iter1473) { + $xfer += $output->writeString($iter1473); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_ext_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_ext_result.php index 6d23127f2712..6ca1744e9029 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_ext_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_ext_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1516 = 0; - $_etype1519 = 0; - $xfer += $input->readListBegin($_etype1519, $_size1516); - for ($_i1520 = 0; $_i1520 < $_size1516; ++$_i1520) { - $elem1521 = null; - $elem1521 = new \metastore\ExtendedTableInfo(); - $xfer += $elem1521->read($input); - $this->success []= $elem1521; + $_size1523 = 0; + $_etype1526 = 0; + $xfer += $input->readListBegin($_etype1526, $_size1523); + for ($_i1527 = 0; $_i1527 < $_size1523; ++$_i1527) { + $elem1528 = null; + $elem1528 = new \metastore\ExtendedTableInfo(); + $xfer += $elem1528->read($input); + $this->success []= $elem1528; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1522) { - $xfer += $iter1522->write($output); + foreach ($this->success as $iter1529) { + $xfer += $iter1529->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_result.php index 1e24c0bae93a..7124e0f5b987 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_tables_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1453 = 0; - $_etype1456 = 0; - $xfer += $input->readListBegin($_etype1456, $_size1453); - for ($_i1457 = 0; $_i1457 < $_size1453; ++$_i1457) { - $elem1458 = null; - $xfer += $input->readString($elem1458); - $this->success []= $elem1458; + $_size1460 = 0; + $_etype1463 = 0; + $xfer += $input->readListBegin($_etype1463, $_size1460); + for ($_i1464 = 0; $_i1464 < $_size1460; ++$_i1464) { + $elem1465 = null; + $xfer += $input->readString($elem1465); + $this->success []= $elem1465; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1459) { - $xfer += $output->writeString($iter1459); + foreach ($this->success as $iter1466) { + $xfer += $output->writeString($iter1466); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_type_all_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_type_all_result.php index 45daa41ab670..4ba3c2b1fff1 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_type_all_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_get_type_all_result.php @@ -86,17 +86,17 @@ public function read($input) case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1367 = 0; - $_ktype1368 = 0; - $_vtype1369 = 0; - $xfer += $input->readMapBegin($_ktype1368, $_vtype1369, $_size1367); - for ($_i1371 = 0; $_i1371 < $_size1367; ++$_i1371) { - $key1372 = ''; - $val1373 = new \metastore\Type(); - $xfer += $input->readString($key1372); - $val1373 = new \metastore\Type(); - $xfer += $val1373->read($input); - $this->success[$key1372] = $val1373; + $_size1374 = 0; + $_ktype1375 = 0; + $_vtype1376 = 0; + $xfer += $input->readMapBegin($_ktype1375, $_vtype1376, $_size1374); + for ($_i1378 = 0; $_i1378 < $_size1374; ++$_i1378) { + $key1379 = ''; + $val1380 = new \metastore\Type(); + $xfer += $input->readString($key1379); + $val1380 = new \metastore\Type(); + $xfer += $val1380->read($input); + $this->success[$key1379] = $val1380; } $xfer += $input->readMapEnd(); } else { @@ -131,9 +131,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::MAP, 0); $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); - foreach ($this->success as $kiter1374 => $viter1375) { - $xfer += $output->writeString($kiter1374); - $xfer += $viter1375->write($output); + foreach ($this->success as $kiter1381 => $viter1382) { + $xfer += $output->writeString($kiter1381); + $xfer += $viter1382->write($output); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_isPartitionMarkedForEvent_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_isPartitionMarkedForEvent_args.php index 38c1c98781de..e0bc6a0013ea 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_isPartitionMarkedForEvent_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_isPartitionMarkedForEvent_args.php @@ -123,16 +123,16 @@ public function read($input) case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1797 = 0; - $_ktype1798 = 0; - $_vtype1799 = 0; - $xfer += $input->readMapBegin($_ktype1798, $_vtype1799, $_size1797); - for ($_i1801 = 0; $_i1801 < $_size1797; ++$_i1801) { - $key1802 = ''; - $val1803 = ''; - $xfer += $input->readString($key1802); - $xfer += $input->readString($val1803); - $this->part_vals[$key1802] = $val1803; + $_size1804 = 0; + $_ktype1805 = 0; + $_vtype1806 = 0; + $xfer += $input->readMapBegin($_ktype1805, $_vtype1806, $_size1804); + for ($_i1808 = 0; $_i1808 < $_size1804; ++$_i1808) { + $key1809 = ''; + $val1810 = ''; + $xfer += $input->readString($key1809); + $xfer += $input->readString($val1810); + $this->part_vals[$key1809] = $val1810; } $xfer += $input->readMapEnd(); } else { @@ -176,9 +176,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::MAP, 3); $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $kiter1804 => $viter1805) { - $xfer += $output->writeString($kiter1804); - $xfer += $output->writeString($viter1805); + foreach ($this->part_vals as $kiter1811 => $viter1812) { + $xfer += $output->writeString($kiter1811); + $xfer += $output->writeString($viter1812); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_privileges_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_privileges_result.php index 0c04dbcb0aea..74058fdfaf54 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_privileges_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_privileges_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1834 = 0; - $_etype1837 = 0; - $xfer += $input->readListBegin($_etype1837, $_size1834); - for ($_i1838 = 0; $_i1838 < $_size1834; ++$_i1838) { - $elem1839 = null; - $elem1839 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem1839->read($input); - $this->success []= $elem1839; + $_size1841 = 0; + $_etype1844 = 0; + $xfer += $input->readListBegin($_etype1844, $_size1841); + for ($_i1845 = 0; $_i1845 < $_size1841; ++$_i1845) { + $elem1846 = null; + $elem1846 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem1846->read($input); + $this->success []= $elem1846; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1840) { - $xfer += $iter1840->write($output); + foreach ($this->success as $iter1847) { + $xfer += $iter1847->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_roles_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_roles_result.php index 860a92a023cc..26d19cd9fc87 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_roles_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_list_roles_result.php @@ -82,14 +82,14 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1820 = 0; - $_etype1823 = 0; - $xfer += $input->readListBegin($_etype1823, $_size1820); - for ($_i1824 = 0; $_i1824 < $_size1820; ++$_i1824) { - $elem1825 = null; - $elem1825 = new \metastore\Role(); - $xfer += $elem1825->read($input); - $this->success []= $elem1825; + $_size1827 = 0; + $_etype1830 = 0; + $xfer += $input->readListBegin($_etype1830, $_size1827); + for ($_i1831 = 0; $_i1831 < $_size1827; ++$_i1831) { + $elem1832 = null; + $elem1832 = new \metastore\Role(); + $xfer += $elem1832->read($input); + $this->success []= $elem1832; } $xfer += $input->readListEnd(); } else { @@ -124,8 +124,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRUCT, count($this->success)); - foreach ($this->success as $iter1826) { - $xfer += $iter1826->write($output); + foreach ($this->success as $iter1833) { + $xfer += $iter1833->write($output); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_markPartitionForEvent_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_markPartitionForEvent_args.php index a335b9c9d02f..4391b3a0f4ba 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_markPartitionForEvent_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_markPartitionForEvent_args.php @@ -123,16 +123,16 @@ public function read($input) case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size1788 = 0; - $_ktype1789 = 0; - $_vtype1790 = 0; - $xfer += $input->readMapBegin($_ktype1789, $_vtype1790, $_size1788); - for ($_i1792 = 0; $_i1792 < $_size1788; ++$_i1792) { - $key1793 = ''; - $val1794 = ''; - $xfer += $input->readString($key1793); - $xfer += $input->readString($val1794); - $this->part_vals[$key1793] = $val1794; + $_size1795 = 0; + $_ktype1796 = 0; + $_vtype1797 = 0; + $xfer += $input->readMapBegin($_ktype1796, $_vtype1797, $_size1795); + for ($_i1799 = 0; $_i1799 < $_size1795; ++$_i1799) { + $key1800 = ''; + $val1801 = ''; + $xfer += $input->readString($key1800); + $xfer += $input->readString($val1801); + $this->part_vals[$key1800] = $val1801; } $xfer += $input->readMapEnd(); } else { @@ -176,9 +176,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::MAP, 3); $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $kiter1795 => $viter1796) { - $xfer += $output->writeString($kiter1795); - $xfer += $output->writeString($viter1796); + foreach ($this->part_vals as $kiter1802 => $viter1803) { + $xfer += $output->writeString($kiter1802); + $xfer += $output->writeString($viter1803); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_has_valid_characters_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_has_valid_characters_args.php index 32a792c46d56..130c0fe3cd77 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_has_valid_characters_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_has_valid_characters_args.php @@ -80,13 +80,13 @@ public function read($input) case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1765 = 0; - $_etype1768 = 0; - $xfer += $input->readListBegin($_etype1768, $_size1765); - for ($_i1769 = 0; $_i1769 < $_size1765; ++$_i1769) { - $elem1770 = null; - $xfer += $input->readString($elem1770); - $this->part_vals []= $elem1770; + $_size1772 = 0; + $_etype1775 = 0; + $xfer += $input->readListBegin($_etype1775, $_size1772); + for ($_i1776 = 0; $_i1776 < $_size1772; ++$_i1776) { + $elem1777 = null; + $xfer += $input->readString($elem1777); + $this->part_vals []= $elem1777; } $xfer += $input->readListEnd(); } else { @@ -120,8 +120,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 1); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1771) { - $xfer += $output->writeString($iter1771); + foreach ($this->part_vals as $iter1778) { + $xfer += $output->writeString($iter1778); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_spec_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_spec_result.php index 18263b34051d..db788f95e942 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_spec_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_spec_result.php @@ -85,16 +85,16 @@ public function read($input) case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size1779 = 0; - $_ktype1780 = 0; - $_vtype1781 = 0; - $xfer += $input->readMapBegin($_ktype1780, $_vtype1781, $_size1779); - for ($_i1783 = 0; $_i1783 < $_size1779; ++$_i1783) { - $key1784 = ''; - $val1785 = ''; - $xfer += $input->readString($key1784); - $xfer += $input->readString($val1785); - $this->success[$key1784] = $val1785; + $_size1786 = 0; + $_ktype1787 = 0; + $_vtype1788 = 0; + $xfer += $input->readMapBegin($_ktype1787, $_vtype1788, $_size1786); + for ($_i1790 = 0; $_i1790 < $_size1786; ++$_i1790) { + $key1791 = ''; + $val1792 = ''; + $xfer += $input->readString($key1791); + $xfer += $input->readString($val1792); + $this->success[$key1791] = $val1792; } $xfer += $input->readMapEnd(); } else { @@ -129,9 +129,9 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::MAP, 0); $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); - foreach ($this->success as $kiter1786 => $viter1787) { - $xfer += $output->writeString($kiter1786); - $xfer += $output->writeString($viter1787); + foreach ($this->success as $kiter1793 => $viter1794) { + $xfer += $output->writeString($kiter1793); + $xfer += $output->writeString($viter1794); } $output->writeMapEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_vals_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_vals_result.php index bd6f8e46f7d0..eae456401469 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_vals_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_partition_name_to_vals_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1772 = 0; - $_etype1775 = 0; - $xfer += $input->readListBegin($_etype1775, $_size1772); - for ($_i1776 = 0; $_i1776 < $_size1772; ++$_i1776) { - $elem1777 = null; - $xfer += $input->readString($elem1777); - $this->success []= $elem1777; + $_size1779 = 0; + $_etype1782 = 0; + $xfer += $input->readListBegin($_etype1782, $_size1779); + for ($_i1783 = 0; $_i1783 < $_size1779; ++$_i1783) { + $elem1784 = null; + $xfer += $input->readString($elem1784); + $this->success []= $elem1784; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1778) { - $xfer += $output->writeString($iter1778); + foreach ($this->success as $iter1785) { + $xfer += $output->writeString($iter1785); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_rename_partition_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_rename_partition_args.php index a0ebcef960cd..dfb455df16fa 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_rename_partition_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_rename_partition_args.php @@ -119,13 +119,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size1758 = 0; - $_etype1761 = 0; - $xfer += $input->readListBegin($_etype1761, $_size1758); - for ($_i1762 = 0; $_i1762 < $_size1758; ++$_i1762) { - $elem1763 = null; - $xfer += $input->readString($elem1763); - $this->part_vals []= $elem1763; + $_size1765 = 0; + $_etype1768 = 0; + $xfer += $input->readListBegin($_etype1768, $_size1765); + for ($_i1769 = 0; $_i1769 < $_size1765; ++$_i1769) { + $elem1770 = null; + $xfer += $input->readString($elem1770); + $this->part_vals []= $elem1770; } $xfer += $input->readListEnd(); } else { @@ -170,8 +170,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('part_vals', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->part_vals)); - foreach ($this->part_vals as $iter1764) { - $xfer += $output->writeString($iter1764); + foreach ($this->part_vals as $iter1771) { + $xfer += $output->writeString($iter1771); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_args.php index 6ce4ee0b957d..012c00c0b81e 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_args.php @@ -87,13 +87,13 @@ public function read($input) case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size1841 = 0; - $_etype1844 = 0; - $xfer += $input->readListBegin($_etype1844, $_size1841); - for ($_i1845 = 0; $_i1845 < $_size1841; ++$_i1845) { - $elem1846 = null; - $xfer += $input->readString($elem1846); - $this->group_names []= $elem1846; + $_size1848 = 0; + $_etype1851 = 0; + $xfer += $input->readListBegin($_etype1851, $_size1848); + for ($_i1852 = 0; $_i1852 < $_size1848; ++$_i1852) { + $elem1853 = null; + $xfer += $input->readString($elem1853); + $this->group_names []= $elem1853; } $xfer += $input->readListEnd(); } else { @@ -125,8 +125,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('group_names', TType::LST, 2); $output->writeListBegin(TType::STRING, count($this->group_names)); - foreach ($this->group_names as $iter1847) { - $xfer += $output->writeString($iter1847); + foreach ($this->group_names as $iter1854) { + $xfer += $output->writeString($iter1854); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_result.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_result.php index 7436e4e22bb7..ec466c2cfba7 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_result.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_set_ugi_result.php @@ -81,13 +81,13 @@ public function read($input) case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size1848 = 0; - $_etype1851 = 0; - $xfer += $input->readListBegin($_etype1851, $_size1848); - for ($_i1852 = 0; $_i1852 < $_size1848; ++$_i1852) { - $elem1853 = null; - $xfer += $input->readString($elem1853); - $this->success []= $elem1853; + $_size1855 = 0; + $_etype1858 = 0; + $xfer += $input->readListBegin($_etype1858, $_size1855); + for ($_i1859 = 0; $_i1859 < $_size1855; ++$_i1859) { + $elem1860 = null; + $xfer += $input->readString($elem1860); + $this->success []= $elem1860; } $xfer += $input->readListEnd(); } else { @@ -122,8 +122,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('success', TType::LST, 0); $output->writeListBegin(TType::STRING, count($this->success)); - foreach ($this->success as $iter1854) { - $xfer += $output->writeString($iter1854); + foreach ($this->success as $iter1861) { + $xfer += $output->writeString($iter1861); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_truncate_table_args.php b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_truncate_table_args.php index 9b895253caeb..61d43f864f30 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_truncate_table_args.php +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore_truncate_table_args.php @@ -106,13 +106,13 @@ public function read($input) case 3: if ($ftype == TType::LST) { $this->partNames = array(); - $_size1446 = 0; - $_etype1449 = 0; - $xfer += $input->readListBegin($_etype1449, $_size1446); - for ($_i1450 = 0; $_i1450 < $_size1446; ++$_i1450) { - $elem1451 = null; - $xfer += $input->readString($elem1451); - $this->partNames []= $elem1451; + $_size1453 = 0; + $_etype1456 = 0; + $xfer += $input->readListBegin($_etype1456, $_size1453); + for ($_i1457 = 0; $_i1457 < $_size1453; ++$_i1457) { + $elem1458 = null; + $xfer += $input->readString($elem1458); + $this->partNames []= $elem1458; } $xfer += $input->readListEnd(); } else { @@ -149,8 +149,8 @@ public function write($output) } $xfer += $output->writeFieldBegin('partNames', TType::LST, 3); $output->writeListBegin(TType::STRING, count($this->partNames)); - foreach ($this->partNames as $iter1452) { - $xfer += $output->writeString($iter1452); + foreach ($this->partNames as $iter1459) { + $xfer += $output->writeString($iter1459); } $output->writeListEnd(); $xfer += $output->writeFieldEnd(); diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index fd7009c45ca5..80ea3fda4e53 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -22459,10 +22459,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1346, _size1343) = iprot.readListBegin() - for _i1347 in range(_size1343): - _elem1348 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1348) + (_etype1353, _size1350) = iprot.readListBegin() + for _i1354 in range(_size1350): + _elem1355 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1355) iprot.readListEnd() else: iprot.skip(ftype) @@ -22484,8 +22484,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1349 in self.success: - oprot.writeString(iter1349.encode('utf-8') if sys.version_info[0] == 2 else iter1349) + for iter1356 in self.success: + oprot.writeString(iter1356.encode('utf-8') if sys.version_info[0] == 2 else iter1356) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22583,10 +22583,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1353, _size1350) = iprot.readListBegin() - for _i1354 in range(_size1350): - _elem1355 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1355) + (_etype1360, _size1357) = iprot.readListBegin() + for _i1361 in range(_size1357): + _elem1362 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1362) iprot.readListEnd() else: iprot.skip(ftype) @@ -22608,8 +22608,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1356 in self.success: - oprot.writeString(iter1356.encode('utf-8') if sys.version_info[0] == 2 else iter1356) + for iter1363 in self.success: + oprot.writeString(iter1363.encode('utf-8') if sys.version_info[0] == 2 else iter1363) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -23326,10 +23326,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1360, _size1357) = iprot.readListBegin() - for _i1361 in range(_size1357): - _elem1362 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1362) + (_etype1367, _size1364) = iprot.readListBegin() + for _i1368 in range(_size1364): + _elem1369 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1369) iprot.readListEnd() else: iprot.skip(ftype) @@ -23351,8 +23351,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1363 in self.success: - oprot.writeString(iter1363.encode('utf-8') if sys.version_info[0] == 2 else iter1363) + for iter1370 in self.success: + oprot.writeString(iter1370.encode('utf-8') if sys.version_info[0] == 2 else iter1370) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24073,12 +24073,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1365, _vtype1366, _size1364) = iprot.readMapBegin() - for _i1368 in range(_size1364): - _key1369 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1370 = Type() - _val1370.read(iprot) - self.success[_key1369] = _val1370 + (_ktype1372, _vtype1373, _size1371) = iprot.readMapBegin() + for _i1375 in range(_size1371): + _key1376 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1377 = Type() + _val1377.read(iprot) + self.success[_key1376] = _val1377 iprot.readMapEnd() else: iprot.skip(ftype) @@ -24100,9 +24100,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter1371, viter1372 in self.success.items(): - oprot.writeString(kiter1371.encode('utf-8') if sys.version_info[0] == 2 else kiter1371) - viter1372.write(oprot) + for kiter1378, viter1379 in self.success.items(): + oprot.writeString(kiter1378.encode('utf-8') if sys.version_info[0] == 2 else kiter1378) + viter1379.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -24235,11 +24235,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1376, _size1373) = iprot.readListBegin() - for _i1377 in range(_size1373): - _elem1378 = FieldSchema() - _elem1378.read(iprot) - self.success.append(_elem1378) + (_etype1383, _size1380) = iprot.readListBegin() + for _i1384 in range(_size1380): + _elem1385 = FieldSchema() + _elem1385.read(iprot) + self.success.append(_elem1385) iprot.readListEnd() else: iprot.skip(ftype) @@ -24271,8 +24271,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1379 in self.success: - iter1379.write(oprot) + for iter1386 in self.success: + iter1386.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24428,11 +24428,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1383, _size1380) = iprot.readListBegin() - for _i1384 in range(_size1380): - _elem1385 = FieldSchema() - _elem1385.read(iprot) - self.success.append(_elem1385) + (_etype1390, _size1387) = iprot.readListBegin() + for _i1391 in range(_size1387): + _elem1392 = FieldSchema() + _elem1392.read(iprot) + self.success.append(_elem1392) iprot.readListEnd() else: iprot.skip(ftype) @@ -24464,8 +24464,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1386 in self.success: - iter1386.write(oprot) + for iter1393 in self.success: + iter1393.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24769,11 +24769,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1390, _size1387) = iprot.readListBegin() - for _i1391 in range(_size1387): - _elem1392 = FieldSchema() - _elem1392.read(iprot) - self.success.append(_elem1392) + (_etype1397, _size1394) = iprot.readListBegin() + for _i1398 in range(_size1394): + _elem1399 = FieldSchema() + _elem1399.read(iprot) + self.success.append(_elem1399) iprot.readListEnd() else: iprot.skip(ftype) @@ -24805,8 +24805,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1393 in self.success: - iter1393.write(oprot) + for iter1400 in self.success: + iter1400.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24962,11 +24962,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1397, _size1394) = iprot.readListBegin() - for _i1398 in range(_size1394): - _elem1399 = FieldSchema() - _elem1399.read(iprot) - self.success.append(_elem1399) + (_etype1404, _size1401) = iprot.readListBegin() + for _i1405 in range(_size1401): + _elem1406 = FieldSchema() + _elem1406.read(iprot) + self.success.append(_elem1406) iprot.readListEnd() else: iprot.skip(ftype) @@ -24998,8 +24998,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1400 in self.success: - iter1400.write(oprot) + for iter1407 in self.success: + iter1407.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -25576,66 +25576,66 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.primaryKeys = [] - (_etype1404, _size1401) = iprot.readListBegin() - for _i1405 in range(_size1401): - _elem1406 = SQLPrimaryKey() - _elem1406.read(iprot) - self.primaryKeys.append(_elem1406) + (_etype1411, _size1408) = iprot.readListBegin() + for _i1412 in range(_size1408): + _elem1413 = SQLPrimaryKey() + _elem1413.read(iprot) + self.primaryKeys.append(_elem1413) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.foreignKeys = [] - (_etype1410, _size1407) = iprot.readListBegin() - for _i1411 in range(_size1407): - _elem1412 = SQLForeignKey() - _elem1412.read(iprot) - self.foreignKeys.append(_elem1412) + (_etype1417, _size1414) = iprot.readListBegin() + for _i1418 in range(_size1414): + _elem1419 = SQLForeignKey() + _elem1419.read(iprot) + self.foreignKeys.append(_elem1419) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.uniqueConstraints = [] - (_etype1416, _size1413) = iprot.readListBegin() - for _i1417 in range(_size1413): - _elem1418 = SQLUniqueConstraint() - _elem1418.read(iprot) - self.uniqueConstraints.append(_elem1418) + (_etype1423, _size1420) = iprot.readListBegin() + for _i1424 in range(_size1420): + _elem1425 = SQLUniqueConstraint() + _elem1425.read(iprot) + self.uniqueConstraints.append(_elem1425) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.notNullConstraints = [] - (_etype1422, _size1419) = iprot.readListBegin() - for _i1423 in range(_size1419): - _elem1424 = SQLNotNullConstraint() - _elem1424.read(iprot) - self.notNullConstraints.append(_elem1424) + (_etype1429, _size1426) = iprot.readListBegin() + for _i1430 in range(_size1426): + _elem1431 = SQLNotNullConstraint() + _elem1431.read(iprot) + self.notNullConstraints.append(_elem1431) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.defaultConstraints = [] - (_etype1428, _size1425) = iprot.readListBegin() - for _i1429 in range(_size1425): - _elem1430 = SQLDefaultConstraint() - _elem1430.read(iprot) - self.defaultConstraints.append(_elem1430) + (_etype1435, _size1432) = iprot.readListBegin() + for _i1436 in range(_size1432): + _elem1437 = SQLDefaultConstraint() + _elem1437.read(iprot) + self.defaultConstraints.append(_elem1437) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 7: if ftype == TType.LIST: self.checkConstraints = [] - (_etype1434, _size1431) = iprot.readListBegin() - for _i1435 in range(_size1431): - _elem1436 = SQLCheckConstraint() - _elem1436.read(iprot) - self.checkConstraints.append(_elem1436) + (_etype1441, _size1438) = iprot.readListBegin() + for _i1442 in range(_size1438): + _elem1443 = SQLCheckConstraint() + _elem1443.read(iprot) + self.checkConstraints.append(_elem1443) iprot.readListEnd() else: iprot.skip(ftype) @@ -25656,43 +25656,43 @@ def write(self, oprot): if self.primaryKeys is not None: oprot.writeFieldBegin('primaryKeys', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) - for iter1437 in self.primaryKeys: - iter1437.write(oprot) + for iter1444 in self.primaryKeys: + iter1444.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.foreignKeys is not None: oprot.writeFieldBegin('foreignKeys', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) - for iter1438 in self.foreignKeys: - iter1438.write(oprot) + for iter1445 in self.foreignKeys: + iter1445.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.uniqueConstraints is not None: oprot.writeFieldBegin('uniqueConstraints', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.uniqueConstraints)) - for iter1439 in self.uniqueConstraints: - iter1439.write(oprot) + for iter1446 in self.uniqueConstraints: + iter1446.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.notNullConstraints is not None: oprot.writeFieldBegin('notNullConstraints', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.notNullConstraints)) - for iter1440 in self.notNullConstraints: - iter1440.write(oprot) + for iter1447 in self.notNullConstraints: + iter1447.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.defaultConstraints is not None: oprot.writeFieldBegin('defaultConstraints', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.defaultConstraints)) - for iter1441 in self.defaultConstraints: - iter1441.write(oprot) + for iter1448 in self.defaultConstraints: + iter1448.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.checkConstraints is not None: oprot.writeFieldBegin('checkConstraints', TType.LIST, 7) oprot.writeListBegin(TType.STRUCT, len(self.checkConstraints)) - for iter1442 in self.checkConstraints: - iter1442.write(oprot) + for iter1449 in self.checkConstraints: + iter1449.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27485,10 +27485,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.partNames = [] - (_etype1446, _size1443) = iprot.readListBegin() - for _i1447 in range(_size1443): - _elem1448 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.partNames.append(_elem1448) + (_etype1453, _size1450) = iprot.readListBegin() + for _i1454 in range(_size1450): + _elem1455 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.partNames.append(_elem1455) iprot.readListEnd() else: iprot.skip(ftype) @@ -27513,8 +27513,8 @@ def write(self, oprot): if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter1449 in self.partNames: - oprot.writeString(iter1449.encode('utf-8') if sys.version_info[0] == 2 else iter1449) + for iter1456 in self.partNames: + oprot.writeString(iter1456.encode('utf-8') if sys.version_info[0] == 2 else iter1456) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -27840,10 +27840,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1453, _size1450) = iprot.readListBegin() - for _i1454 in range(_size1450): - _elem1455 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1455) + (_etype1460, _size1457) = iprot.readListBegin() + for _i1461 in range(_size1457): + _elem1462 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1462) iprot.readListEnd() else: iprot.skip(ftype) @@ -27865,8 +27865,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1456 in self.success: - oprot.writeString(iter1456.encode('utf-8') if sys.version_info[0] == 2 else iter1456) + for iter1463 in self.success: + oprot.writeString(iter1463.encode('utf-8') if sys.version_info[0] == 2 else iter1463) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28007,10 +28007,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1460, _size1457) = iprot.readListBegin() - for _i1461 in range(_size1457): - _elem1462 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1462) + (_etype1467, _size1464) = iprot.readListBegin() + for _i1468 in range(_size1464): + _elem1469 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1469) iprot.readListEnd() else: iprot.skip(ftype) @@ -28032,8 +28032,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1463 in self.success: - oprot.writeString(iter1463.encode('utf-8') if sys.version_info[0] == 2 else iter1463) + for iter1470 in self.success: + oprot.writeString(iter1470.encode('utf-8') if sys.version_info[0] == 2 else iter1470) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28131,11 +28131,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1467, _size1464) = iprot.readListBegin() - for _i1468 in range(_size1464): - _elem1469 = Table() - _elem1469.read(iprot) - self.success.append(_elem1469) + (_etype1474, _size1471) = iprot.readListBegin() + for _i1475 in range(_size1471): + _elem1476 = Table() + _elem1476.read(iprot) + self.success.append(_elem1476) iprot.readListEnd() else: iprot.skip(ftype) @@ -28157,8 +28157,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1470 in self.success: - iter1470.write(oprot) + for iter1477 in self.success: + iter1477.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28275,10 +28275,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1474, _size1471) = iprot.readListBegin() - for _i1475 in range(_size1471): - _elem1476 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1476) + (_etype1481, _size1478) = iprot.readListBegin() + for _i1482 in range(_size1478): + _elem1483 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1483) iprot.readListEnd() else: iprot.skip(ftype) @@ -28300,8 +28300,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1477 in self.success: - oprot.writeString(iter1477.encode('utf-8') if sys.version_info[0] == 2 else iter1477) + for iter1484 in self.success: + oprot.writeString(iter1484.encode('utf-8') if sys.version_info[0] == 2 else iter1484) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28368,10 +28368,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype1481, _size1478) = iprot.readListBegin() - for _i1482 in range(_size1478): - _elem1483 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.tbl_types.append(_elem1483) + (_etype1488, _size1485) = iprot.readListBegin() + for _i1489 in range(_size1485): + _elem1490 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.tbl_types.append(_elem1490) iprot.readListEnd() else: iprot.skip(ftype) @@ -28396,8 +28396,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter1484 in self.tbl_types: - oprot.writeString(iter1484.encode('utf-8') if sys.version_info[0] == 2 else iter1484) + for iter1491 in self.tbl_types: + oprot.writeString(iter1491.encode('utf-8') if sys.version_info[0] == 2 else iter1491) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28450,11 +28450,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1488, _size1485) = iprot.readListBegin() - for _i1489 in range(_size1485): - _elem1490 = TableMeta() - _elem1490.read(iprot) - self.success.append(_elem1490) + (_etype1495, _size1492) = iprot.readListBegin() + for _i1496 in range(_size1492): + _elem1497 = TableMeta() + _elem1497.read(iprot) + self.success.append(_elem1497) iprot.readListEnd() else: iprot.skip(ftype) @@ -28476,8 +28476,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1491 in self.success: - iter1491.write(oprot) + for iter1498 in self.success: + iter1498.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28594,10 +28594,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1495, _size1492) = iprot.readListBegin() - for _i1496 in range(_size1492): - _elem1497 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1497) + (_etype1502, _size1499) = iprot.readListBegin() + for _i1503 in range(_size1499): + _elem1504 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1504) iprot.readListEnd() else: iprot.skip(ftype) @@ -28619,8 +28619,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1498 in self.success: - oprot.writeString(iter1498.encode('utf-8') if sys.version_info[0] == 2 else iter1498) + for iter1505 in self.success: + oprot.writeString(iter1505.encode('utf-8') if sys.version_info[0] == 2 else iter1505) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28840,10 +28840,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype1502, _size1499) = iprot.readListBegin() - for _i1503 in range(_size1499): - _elem1504 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.tbl_names.append(_elem1504) + (_etype1509, _size1506) = iprot.readListBegin() + for _i1510 in range(_size1506): + _elem1511 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.tbl_names.append(_elem1511) iprot.readListEnd() else: iprot.skip(ftype) @@ -28864,8 +28864,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter1505 in self.tbl_names: - oprot.writeString(iter1505.encode('utf-8') if sys.version_info[0] == 2 else iter1505) + for iter1512 in self.tbl_names: + oprot.writeString(iter1512.encode('utf-8') if sys.version_info[0] == 2 else iter1512) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28915,11 +28915,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1509, _size1506) = iprot.readListBegin() - for _i1510 in range(_size1506): - _elem1511 = Table() - _elem1511.read(iprot) - self.success.append(_elem1511) + (_etype1516, _size1513) = iprot.readListBegin() + for _i1517 in range(_size1513): + _elem1518 = Table() + _elem1518.read(iprot) + self.success.append(_elem1518) iprot.readListEnd() else: iprot.skip(ftype) @@ -28936,8 +28936,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1512 in self.success: - iter1512.write(oprot) + for iter1519 in self.success: + iter1519.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29050,11 +29050,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1516, _size1513) = iprot.readListBegin() - for _i1517 in range(_size1513): - _elem1518 = ExtendedTableInfo() - _elem1518.read(iprot) - self.success.append(_elem1518) + (_etype1523, _size1520) = iprot.readListBegin() + for _i1524 in range(_size1520): + _elem1525 = ExtendedTableInfo() + _elem1525.read(iprot) + self.success.append(_elem1525) iprot.readListEnd() else: iprot.skip(ftype) @@ -29076,8 +29076,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1519 in self.success: - iter1519.write(oprot) + for iter1526 in self.success: + iter1526.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29890,10 +29890,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1523, _size1520) = iprot.readListBegin() - for _i1524 in range(_size1520): - _elem1525 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1525) + (_etype1530, _size1527) = iprot.readListBegin() + for _i1531 in range(_size1527): + _elem1532 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1532) iprot.readListEnd() else: iprot.skip(ftype) @@ -29925,8 +29925,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1526 in self.success: - oprot.writeString(iter1526.encode('utf-8') if sys.version_info[0] == 2 else iter1526) + for iter1533 in self.success: + oprot.writeString(iter1533.encode('utf-8') if sys.version_info[0] == 2 else iter1533) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30981,11 +30981,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1530, _size1527) = iprot.readListBegin() - for _i1531 in range(_size1527): - _elem1532 = Partition() - _elem1532.read(iprot) - self.new_parts.append(_elem1532) + (_etype1537, _size1534) = iprot.readListBegin() + for _i1538 in range(_size1534): + _elem1539 = Partition() + _elem1539.read(iprot) + self.new_parts.append(_elem1539) iprot.readListEnd() else: iprot.skip(ftype) @@ -31002,8 +31002,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1533 in self.new_parts: - iter1533.write(oprot) + for iter1540 in self.new_parts: + iter1540.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31149,11 +31149,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype1537, _size1534) = iprot.readListBegin() - for _i1538 in range(_size1534): - _elem1539 = PartitionSpec() - _elem1539.read(iprot) - self.new_parts.append(_elem1539) + (_etype1544, _size1541) = iprot.readListBegin() + for _i1545 in range(_size1541): + _elem1546 = PartitionSpec() + _elem1546.read(iprot) + self.new_parts.append(_elem1546) iprot.readListEnd() else: iprot.skip(ftype) @@ -31170,8 +31170,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1540 in self.new_parts: - iter1540.write(oprot) + for iter1547 in self.new_parts: + iter1547.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31331,10 +31331,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1544, _size1541) = iprot.readListBegin() - for _i1545 in range(_size1541): - _elem1546 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1546) + (_etype1551, _size1548) = iprot.readListBegin() + for _i1552 in range(_size1548): + _elem1553 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1553) iprot.readListEnd() else: iprot.skip(ftype) @@ -31359,8 +31359,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1547 in self.part_vals: - oprot.writeString(iter1547.encode('utf-8') if sys.version_info[0] == 2 else iter1547) + for iter1554 in self.part_vals: + oprot.writeString(iter1554.encode('utf-8') if sys.version_info[0] == 2 else iter1554) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31686,10 +31686,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1551, _size1548) = iprot.readListBegin() - for _i1552 in range(_size1548): - _elem1553 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1553) + (_etype1558, _size1555) = iprot.readListBegin() + for _i1559 in range(_size1555): + _elem1560 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1560) iprot.readListEnd() else: iprot.skip(ftype) @@ -31720,8 +31720,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1554 in self.part_vals: - oprot.writeString(iter1554.encode('utf-8') if sys.version_info[0] == 2 else iter1554) + for iter1561 in self.part_vals: + oprot.writeString(iter1561.encode('utf-8') if sys.version_info[0] == 2 else iter1561) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -32272,10 +32272,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1558, _size1555) = iprot.readListBegin() - for _i1559 in range(_size1555): - _elem1560 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1560) + (_etype1565, _size1562) = iprot.readListBegin() + for _i1566 in range(_size1562): + _elem1567 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1567) iprot.readListEnd() else: iprot.skip(ftype) @@ -32305,8 +32305,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1561 in self.part_vals: - oprot.writeString(iter1561.encode('utf-8') if sys.version_info[0] == 2 else iter1561) + for iter1568 in self.part_vals: + oprot.writeString(iter1568.encode('utf-8') if sys.version_info[0] == 2 else iter1568) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -32465,10 +32465,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1565, _size1562) = iprot.readListBegin() - for _i1566 in range(_size1562): - _elem1567 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1567) + (_etype1572, _size1569) = iprot.readListBegin() + for _i1573 in range(_size1569): + _elem1574 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1574) iprot.readListEnd() else: iprot.skip(ftype) @@ -32504,8 +32504,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1568 in self.part_vals: - oprot.writeString(iter1568.encode('utf-8') if sys.version_info[0] == 2 else iter1568) + for iter1575 in self.part_vals: + oprot.writeString(iter1575.encode('utf-8') if sys.version_info[0] == 2 else iter1575) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -33193,10 +33193,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1572, _size1569) = iprot.readListBegin() - for _i1573 in range(_size1569): - _elem1574 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1574) + (_etype1579, _size1576) = iprot.readListBegin() + for _i1580 in range(_size1576): + _elem1581 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1581) iprot.readListEnd() else: iprot.skip(ftype) @@ -33221,8 +33221,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1575 in self.part_vals: - oprot.writeString(iter1575.encode('utf-8') if sys.version_info[0] == 2 else iter1575) + for iter1582 in self.part_vals: + oprot.writeString(iter1582.encode('utf-8') if sys.version_info[0] == 2 else iter1582) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -33516,11 +33516,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1577, _vtype1578, _size1576) = iprot.readMapBegin() - for _i1580 in range(_size1576): - _key1581 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1582 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.partitionSpecs[_key1581] = _val1582 + (_ktype1584, _vtype1585, _size1583) = iprot.readMapBegin() + for _i1587 in range(_size1583): + _key1588 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1589 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.partitionSpecs[_key1588] = _val1589 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33557,9 +33557,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1583, viter1584 in self.partitionSpecs.items(): - oprot.writeString(kiter1583.encode('utf-8') if sys.version_info[0] == 2 else kiter1583) - oprot.writeString(viter1584.encode('utf-8') if sys.version_info[0] == 2 else viter1584) + for kiter1590, viter1591 in self.partitionSpecs.items(): + oprot.writeString(kiter1590.encode('utf-8') if sys.version_info[0] == 2 else kiter1590) + oprot.writeString(viter1591.encode('utf-8') if sys.version_info[0] == 2 else viter1591) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -33746,11 +33746,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype1586, _vtype1587, _size1585) = iprot.readMapBegin() - for _i1589 in range(_size1585): - _key1590 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1591 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.partitionSpecs[_key1590] = _val1591 + (_ktype1593, _vtype1594, _size1592) = iprot.readMapBegin() + for _i1596 in range(_size1592): + _key1597 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1598 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.partitionSpecs[_key1597] = _val1598 iprot.readMapEnd() else: iprot.skip(ftype) @@ -33787,9 +33787,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter1592, viter1593 in self.partitionSpecs.items(): - oprot.writeString(kiter1592.encode('utf-8') if sys.version_info[0] == 2 else kiter1592) - oprot.writeString(viter1593.encode('utf-8') if sys.version_info[0] == 2 else viter1593) + for kiter1599, viter1600 in self.partitionSpecs.items(): + oprot.writeString(kiter1599.encode('utf-8') if sys.version_info[0] == 2 else kiter1599) + oprot.writeString(viter1600.encode('utf-8') if sys.version_info[0] == 2 else viter1600) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -33866,11 +33866,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1597, _size1594) = iprot.readListBegin() - for _i1598 in range(_size1594): - _elem1599 = Partition() - _elem1599.read(iprot) - self.success.append(_elem1599) + (_etype1604, _size1601) = iprot.readListBegin() + for _i1605 in range(_size1601): + _elem1606 = Partition() + _elem1606.read(iprot) + self.success.append(_elem1606) iprot.readListEnd() else: iprot.skip(ftype) @@ -33907,8 +33907,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1600 in self.success: - iter1600.write(oprot) + for iter1607 in self.success: + iter1607.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -33994,10 +33994,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1604, _size1601) = iprot.readListBegin() - for _i1605 in range(_size1601): - _elem1606 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1606) + (_etype1611, _size1608) = iprot.readListBegin() + for _i1612 in range(_size1608): + _elem1613 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1613) iprot.readListEnd() else: iprot.skip(ftype) @@ -34009,10 +34009,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1610, _size1607) = iprot.readListBegin() - for _i1611 in range(_size1607): - _elem1612 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.group_names.append(_elem1612) + (_etype1617, _size1614) = iprot.readListBegin() + for _i1618 in range(_size1614): + _elem1619 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.group_names.append(_elem1619) iprot.readListEnd() else: iprot.skip(ftype) @@ -34037,8 +34037,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1613 in self.part_vals: - oprot.writeString(iter1613.encode('utf-8') if sys.version_info[0] == 2 else iter1613) + for iter1620 in self.part_vals: + oprot.writeString(iter1620.encode('utf-8') if sys.version_info[0] == 2 else iter1620) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -34048,8 +34048,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1614 in self.group_names: - oprot.writeString(iter1614.encode('utf-8') if sys.version_info[0] == 2 else iter1614) + for iter1621 in self.group_names: + oprot.writeString(iter1621.encode('utf-8') if sys.version_info[0] == 2 else iter1621) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34450,11 +34450,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1618, _size1615) = iprot.readListBegin() - for _i1619 in range(_size1615): - _elem1620 = Partition() - _elem1620.read(iprot) - self.success.append(_elem1620) + (_etype1625, _size1622) = iprot.readListBegin() + for _i1626 in range(_size1622): + _elem1627 = Partition() + _elem1627.read(iprot) + self.success.append(_elem1627) iprot.readListEnd() else: iprot.skip(ftype) @@ -34481,8 +34481,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1621 in self.success: - iter1621.write(oprot) + for iter1628 in self.success: + iter1628.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34717,10 +34717,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype1625, _size1622) = iprot.readListBegin() - for _i1626 in range(_size1622): - _elem1627 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.group_names.append(_elem1627) + (_etype1632, _size1629) = iprot.readListBegin() + for _i1633 in range(_size1629): + _elem1634 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.group_names.append(_elem1634) iprot.readListEnd() else: iprot.skip(ftype) @@ -34753,8 +34753,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1628 in self.group_names: - oprot.writeString(iter1628.encode('utf-8') if sys.version_info[0] == 2 else iter1628) + for iter1635 in self.group_names: + oprot.writeString(iter1635.encode('utf-8') if sys.version_info[0] == 2 else iter1635) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -34811,11 +34811,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1632, _size1629) = iprot.readListBegin() - for _i1633 in range(_size1629): - _elem1634 = Partition() - _elem1634.read(iprot) - self.success.append(_elem1634) + (_etype1639, _size1636) = iprot.readListBegin() + for _i1640 in range(_size1636): + _elem1641 = Partition() + _elem1641.read(iprot) + self.success.append(_elem1641) iprot.readListEnd() else: iprot.skip(ftype) @@ -34842,8 +34842,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1635 in self.success: - iter1635.write(oprot) + for iter1642 in self.success: + iter1642.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -34991,11 +34991,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1639, _size1636) = iprot.readListBegin() - for _i1640 in range(_size1636): - _elem1641 = PartitionSpec() - _elem1641.read(iprot) - self.success.append(_elem1641) + (_etype1646, _size1643) = iprot.readListBegin() + for _i1647 in range(_size1643): + _elem1648 = PartitionSpec() + _elem1648.read(iprot) + self.success.append(_elem1648) iprot.readListEnd() else: iprot.skip(ftype) @@ -35022,8 +35022,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1642 in self.success: - iter1642.write(oprot) + for iter1649 in self.success: + iter1649.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35171,10 +35171,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1646, _size1643) = iprot.readListBegin() - for _i1647 in range(_size1643): - _elem1648 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1648) + (_etype1653, _size1650) = iprot.readListBegin() + for _i1654 in range(_size1650): + _elem1655 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1655) iprot.readListEnd() else: iprot.skip(ftype) @@ -35201,8 +35201,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1649 in self.success: - oprot.writeString(iter1649.encode('utf-8') if sys.version_info[0] == 2 else iter1649) + for iter1656 in self.success: + oprot.writeString(iter1656.encode('utf-8') if sys.version_info[0] == 2 else iter1656) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35425,10 +35425,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1653, _size1650) = iprot.readListBegin() - for _i1654 in range(_size1650): - _elem1655 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1655) + (_etype1660, _size1657) = iprot.readListBegin() + for _i1661 in range(_size1657): + _elem1662 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1662) iprot.readListEnd() else: iprot.skip(ftype) @@ -35458,8 +35458,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1656 in self.part_vals: - oprot.writeString(iter1656.encode('utf-8') if sys.version_info[0] == 2 else iter1656) + for iter1663 in self.part_vals: + oprot.writeString(iter1663.encode('utf-8') if sys.version_info[0] == 2 else iter1663) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -35519,11 +35519,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1660, _size1657) = iprot.readListBegin() - for _i1661 in range(_size1657): - _elem1662 = Partition() - _elem1662.read(iprot) - self.success.append(_elem1662) + (_etype1667, _size1664) = iprot.readListBegin() + for _i1668 in range(_size1664): + _elem1669 = Partition() + _elem1669.read(iprot) + self.success.append(_elem1669) iprot.readListEnd() else: iprot.skip(ftype) @@ -35550,8 +35550,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1663 in self.success: - iter1663.write(oprot) + for iter1670 in self.success: + iter1670.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -35629,10 +35629,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1667, _size1664) = iprot.readListBegin() - for _i1668 in range(_size1664): - _elem1669 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1669) + (_etype1674, _size1671) = iprot.readListBegin() + for _i1675 in range(_size1671): + _elem1676 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1676) iprot.readListEnd() else: iprot.skip(ftype) @@ -35649,10 +35649,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype1673, _size1670) = iprot.readListBegin() - for _i1674 in range(_size1670): - _elem1675 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.group_names.append(_elem1675) + (_etype1680, _size1677) = iprot.readListBegin() + for _i1681 in range(_size1677): + _elem1682 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.group_names.append(_elem1682) iprot.readListEnd() else: iprot.skip(ftype) @@ -35677,8 +35677,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1676 in self.part_vals: - oprot.writeString(iter1676.encode('utf-8') if sys.version_info[0] == 2 else iter1676) + for iter1683 in self.part_vals: + oprot.writeString(iter1683.encode('utf-8') if sys.version_info[0] == 2 else iter1683) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -35692,8 +35692,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1677 in self.group_names: - oprot.writeString(iter1677.encode('utf-8') if sys.version_info[0] == 2 else iter1677) + for iter1684 in self.group_names: + oprot.writeString(iter1684.encode('utf-8') if sys.version_info[0] == 2 else iter1684) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -35751,11 +35751,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1681, _size1678) = iprot.readListBegin() - for _i1682 in range(_size1678): - _elem1683 = Partition() - _elem1683.read(iprot) - self.success.append(_elem1683) + (_etype1688, _size1685) = iprot.readListBegin() + for _i1689 in range(_size1685): + _elem1690 = Partition() + _elem1690.read(iprot) + self.success.append(_elem1690) iprot.readListEnd() else: iprot.skip(ftype) @@ -35782,8 +35782,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1684 in self.success: - iter1684.write(oprot) + for iter1691 in self.success: + iter1691.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36006,10 +36006,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1688, _size1685) = iprot.readListBegin() - for _i1689 in range(_size1685): - _elem1690 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1690) + (_etype1695, _size1692) = iprot.readListBegin() + for _i1696 in range(_size1692): + _elem1697 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1697) iprot.readListEnd() else: iprot.skip(ftype) @@ -36039,8 +36039,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1691 in self.part_vals: - oprot.writeString(iter1691.encode('utf-8') if sys.version_info[0] == 2 else iter1691) + for iter1698 in self.part_vals: + oprot.writeString(iter1698.encode('utf-8') if sys.version_info[0] == 2 else iter1698) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -36100,10 +36100,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1695, _size1692) = iprot.readListBegin() - for _i1696 in range(_size1692): - _elem1697 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1697) + (_etype1702, _size1699) = iprot.readListBegin() + for _i1703 in range(_size1699): + _elem1704 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1704) iprot.readListEnd() else: iprot.skip(ftype) @@ -36130,8 +36130,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1698 in self.success: - oprot.writeString(iter1698.encode('utf-8') if sys.version_info[0] == 2 else iter1698) + for iter1705 in self.success: + oprot.writeString(iter1705.encode('utf-8') if sys.version_info[0] == 2 else iter1705) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36405,10 +36405,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1702, _size1699) = iprot.readListBegin() - for _i1703 in range(_size1699): - _elem1704 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1704) + (_etype1709, _size1706) = iprot.readListBegin() + for _i1710 in range(_size1706): + _elem1711 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1711) iprot.readListEnd() else: iprot.skip(ftype) @@ -36435,8 +36435,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1705 in self.success: - oprot.writeString(iter1705.encode('utf-8') if sys.version_info[0] == 2 else iter1705) + for iter1712 in self.success: + oprot.writeString(iter1712.encode('utf-8') if sys.version_info[0] == 2 else iter1712) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36596,11 +36596,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1709, _size1706) = iprot.readListBegin() - for _i1710 in range(_size1706): - _elem1711 = Partition() - _elem1711.read(iprot) - self.success.append(_elem1711) + (_etype1716, _size1713) = iprot.readListBegin() + for _i1717 in range(_size1713): + _elem1718 = Partition() + _elem1718.read(iprot) + self.success.append(_elem1718) iprot.readListEnd() else: iprot.skip(ftype) @@ -36627,8 +36627,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1712 in self.success: - iter1712.write(oprot) + for iter1719 in self.success: + iter1719.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36753,11 +36753,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1716, _size1713) = iprot.readListBegin() - for _i1717 in range(_size1713): - _elem1718 = Partition() - _elem1718.read(iprot) - self.success.append(_elem1718) + (_etype1723, _size1720) = iprot.readListBegin() + for _i1724 in range(_size1720): + _elem1725 = Partition() + _elem1725.read(iprot) + self.success.append(_elem1725) iprot.readListEnd() else: iprot.skip(ftype) @@ -36784,8 +36784,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1719 in self.success: - iter1719.write(oprot) + for iter1726 in self.success: + iter1726.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -36945,11 +36945,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1723, _size1720) = iprot.readListBegin() - for _i1724 in range(_size1720): - _elem1725 = PartitionSpec() - _elem1725.read(iprot) - self.success.append(_elem1725) + (_etype1730, _size1727) = iprot.readListBegin() + for _i1731 in range(_size1727): + _elem1732 = PartitionSpec() + _elem1732.read(iprot) + self.success.append(_elem1732) iprot.readListEnd() else: iprot.skip(ftype) @@ -36976,8 +36976,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1726 in self.success: - iter1726.write(oprot) + for iter1733 in self.success: + iter1733.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -37518,10 +37518,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype1730, _size1727) = iprot.readListBegin() - for _i1731 in range(_size1727): - _elem1732 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.names.append(_elem1732) + (_etype1737, _size1734) = iprot.readListBegin() + for _i1738 in range(_size1734): + _elem1739 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.names.append(_elem1739) iprot.readListEnd() else: iprot.skip(ftype) @@ -37546,8 +37546,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter1733 in self.names: - oprot.writeString(iter1733.encode('utf-8') if sys.version_info[0] == 2 else iter1733) + for iter1740 in self.names: + oprot.writeString(iter1740.encode('utf-8') if sys.version_info[0] == 2 else iter1740) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -37604,11 +37604,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1737, _size1734) = iprot.readListBegin() - for _i1738 in range(_size1734): - _elem1739 = Partition() - _elem1739.read(iprot) - self.success.append(_elem1739) + (_etype1744, _size1741) = iprot.readListBegin() + for _i1745 in range(_size1741): + _elem1746 = Partition() + _elem1746.read(iprot) + self.success.append(_elem1746) iprot.readListEnd() else: iprot.skip(ftype) @@ -37640,8 +37640,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1740 in self.success: - iter1740.write(oprot) + for iter1747 in self.success: + iter1747.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -38337,11 +38337,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1744, _size1741) = iprot.readListBegin() - for _i1745 in range(_size1741): - _elem1746 = Partition() - _elem1746.read(iprot) - self.new_parts.append(_elem1746) + (_etype1751, _size1748) = iprot.readListBegin() + for _i1752 in range(_size1748): + _elem1753 = Partition() + _elem1753.read(iprot) + self.new_parts.append(_elem1753) iprot.readListEnd() else: iprot.skip(ftype) @@ -38366,8 +38366,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1747 in self.new_parts: - iter1747.write(oprot) + for iter1754 in self.new_parts: + iter1754.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -38508,11 +38508,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype1751, _size1748) = iprot.readListBegin() - for _i1752 in range(_size1748): - _elem1753 = Partition() - _elem1753.read(iprot) - self.new_parts.append(_elem1753) + (_etype1758, _size1755) = iprot.readListBegin() + for _i1759 in range(_size1755): + _elem1760 = Partition() + _elem1760.read(iprot) + self.new_parts.append(_elem1760) iprot.readListEnd() else: iprot.skip(ftype) @@ -38543,8 +38543,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter1754 in self.new_parts: - iter1754.write(oprot) + for iter1761 in self.new_parts: + iter1761.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -39013,10 +39013,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype1758, _size1755) = iprot.readListBegin() - for _i1759 in range(_size1755): - _elem1760 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1760) + (_etype1765, _size1762) = iprot.readListBegin() + for _i1766 in range(_size1762): + _elem1767 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1767) iprot.readListEnd() else: iprot.skip(ftype) @@ -39047,8 +39047,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1761 in self.part_vals: - oprot.writeString(iter1761.encode('utf-8') if sys.version_info[0] == 2 else iter1761) + for iter1768 in self.part_vals: + oprot.writeString(iter1768.encode('utf-8') if sys.version_info[0] == 2 else iter1768) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -39329,10 +39329,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype1765, _size1762) = iprot.readListBegin() - for _i1766 in range(_size1762): - _elem1767 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals.append(_elem1767) + (_etype1772, _size1769) = iprot.readListBegin() + for _i1773 in range(_size1769): + _elem1774 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals.append(_elem1774) iprot.readListEnd() else: iprot.skip(ftype) @@ -39354,8 +39354,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter1768 in self.part_vals: - oprot.writeString(iter1768.encode('utf-8') if sys.version_info[0] == 2 else iter1768) + for iter1775 in self.part_vals: + oprot.writeString(iter1775.encode('utf-8') if sys.version_info[0] == 2 else iter1775) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -39693,10 +39693,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1772, _size1769) = iprot.readListBegin() - for _i1773 in range(_size1769): - _elem1774 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1774) + (_etype1779, _size1776) = iprot.readListBegin() + for _i1780 in range(_size1776): + _elem1781 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1781) iprot.readListEnd() else: iprot.skip(ftype) @@ -39718,8 +39718,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1775 in self.success: - oprot.writeString(iter1775.encode('utf-8') if sys.version_info[0] == 2 else iter1775) + for iter1782 in self.success: + oprot.writeString(iter1782.encode('utf-8') if sys.version_info[0] == 2 else iter1782) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39836,11 +39836,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype1777, _vtype1778, _size1776) = iprot.readMapBegin() - for _i1780 in range(_size1776): - _key1781 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1782 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success[_key1781] = _val1782 + (_ktype1784, _vtype1785, _size1783) = iprot.readMapBegin() + for _i1787 in range(_size1783): + _key1788 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1789 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success[_key1788] = _val1789 iprot.readMapEnd() else: iprot.skip(ftype) @@ -39862,9 +39862,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter1783, viter1784 in self.success.items(): - oprot.writeString(kiter1783.encode('utf-8') if sys.version_info[0] == 2 else kiter1783) - oprot.writeString(viter1784.encode('utf-8') if sys.version_info[0] == 2 else viter1784) + for kiter1790, viter1791 in self.success.items(): + oprot.writeString(kiter1790.encode('utf-8') if sys.version_info[0] == 2 else kiter1790) + oprot.writeString(viter1791.encode('utf-8') if sys.version_info[0] == 2 else viter1791) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -39933,11 +39933,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1786, _vtype1787, _size1785) = iprot.readMapBegin() - for _i1789 in range(_size1785): - _key1790 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1791 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals[_key1790] = _val1791 + (_ktype1793, _vtype1794, _size1792) = iprot.readMapBegin() + for _i1796 in range(_size1792): + _key1797 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1798 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals[_key1797] = _val1798 iprot.readMapEnd() else: iprot.skip(ftype) @@ -39967,9 +39967,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1792, viter1793 in self.part_vals.items(): - oprot.writeString(kiter1792.encode('utf-8') if sys.version_info[0] == 2 else kiter1792) - oprot.writeString(viter1793.encode('utf-8') if sys.version_info[0] == 2 else viter1793) + for kiter1799, viter1800 in self.part_vals.items(): + oprot.writeString(kiter1799.encode('utf-8') if sys.version_info[0] == 2 else kiter1799) + oprot.writeString(viter1800.encode('utf-8') if sys.version_info[0] == 2 else viter1800) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -40163,11 +40163,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype1795, _vtype1796, _size1794) = iprot.readMapBegin() - for _i1798 in range(_size1794): - _key1799 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1800 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.part_vals[_key1799] = _val1800 + (_ktype1802, _vtype1803, _size1801) = iprot.readMapBegin() + for _i1805 in range(_size1801): + _key1806 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1807 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.part_vals[_key1806] = _val1807 iprot.readMapEnd() else: iprot.skip(ftype) @@ -40197,9 +40197,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter1801, viter1802 in self.part_vals.items(): - oprot.writeString(kiter1801.encode('utf-8') if sys.version_info[0] == 2 else kiter1801) - oprot.writeString(viter1802.encode('utf-8') if sys.version_info[0] == 2 else viter1802) + for kiter1808, viter1809 in self.part_vals.items(): + oprot.writeString(kiter1808.encode('utf-8') if sys.version_info[0] == 2 else kiter1808) + oprot.writeString(viter1809.encode('utf-8') if sys.version_info[0] == 2 else viter1809) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -44241,10 +44241,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1806, _size1803) = iprot.readListBegin() - for _i1807 in range(_size1803): - _elem1808 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1808) + (_etype1813, _size1810) = iprot.readListBegin() + for _i1814 in range(_size1810): + _elem1815 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1815) iprot.readListEnd() else: iprot.skip(ftype) @@ -44266,8 +44266,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1809 in self.success: - oprot.writeString(iter1809.encode('utf-8') if sys.version_info[0] == 2 else iter1809) + for iter1816 in self.success: + oprot.writeString(iter1816.encode('utf-8') if sys.version_info[0] == 2 else iter1816) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -44913,10 +44913,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1813, _size1810) = iprot.readListBegin() - for _i1814 in range(_size1810): - _elem1815 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1815) + (_etype1820, _size1817) = iprot.readListBegin() + for _i1821 in range(_size1817): + _elem1822 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1822) iprot.readListEnd() else: iprot.skip(ftype) @@ -44938,8 +44938,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1816 in self.success: - oprot.writeString(iter1816.encode('utf-8') if sys.version_info[0] == 2 else iter1816) + for iter1823 in self.success: + oprot.writeString(iter1823.encode('utf-8') if sys.version_info[0] == 2 else iter1823) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -45422,11 +45422,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1820, _size1817) = iprot.readListBegin() - for _i1821 in range(_size1817): - _elem1822 = Role() - _elem1822.read(iprot) - self.success.append(_elem1822) + (_etype1827, _size1824) = iprot.readListBegin() + for _i1828 in range(_size1824): + _elem1829 = Role() + _elem1829.read(iprot) + self.success.append(_elem1829) iprot.readListEnd() else: iprot.skip(ftype) @@ -45448,8 +45448,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1823 in self.success: - iter1823.write(oprot) + for iter1830 in self.success: + iter1830.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -45928,10 +45928,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype1827, _size1824) = iprot.readListBegin() - for _i1828 in range(_size1824): - _elem1829 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.group_names.append(_elem1829) + (_etype1834, _size1831) = iprot.readListBegin() + for _i1835 in range(_size1831): + _elem1836 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.group_names.append(_elem1836) iprot.readListEnd() else: iprot.skip(ftype) @@ -45956,8 +45956,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1830 in self.group_names: - oprot.writeString(iter1830.encode('utf-8') if sys.version_info[0] == 2 else iter1830) + for iter1837 in self.group_names: + oprot.writeString(iter1837.encode('utf-8') if sys.version_info[0] == 2 else iter1837) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -46171,11 +46171,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1834, _size1831) = iprot.readListBegin() - for _i1835 in range(_size1831): - _elem1836 = HiveObjectPrivilege() - _elem1836.read(iprot) - self.success.append(_elem1836) + (_etype1841, _size1838) = iprot.readListBegin() + for _i1842 in range(_size1838): + _elem1843 = HiveObjectPrivilege() + _elem1843.read(iprot) + self.success.append(_elem1843) iprot.readListEnd() else: iprot.skip(ftype) @@ -46197,8 +46197,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1837 in self.success: - iter1837.write(oprot) + for iter1844 in self.success: + iter1844.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -46829,10 +46829,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype1841, _size1838) = iprot.readListBegin() - for _i1842 in range(_size1838): - _elem1843 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.group_names.append(_elem1843) + (_etype1848, _size1845) = iprot.readListBegin() + for _i1849 in range(_size1845): + _elem1850 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.group_names.append(_elem1850) iprot.readListEnd() else: iprot.skip(ftype) @@ -46853,8 +46853,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter1844 in self.group_names: - oprot.writeString(iter1844.encode('utf-8') if sys.version_info[0] == 2 else iter1844) + for iter1851 in self.group_names: + oprot.writeString(iter1851.encode('utf-8') if sys.version_info[0] == 2 else iter1851) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -46906,10 +46906,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1848, _size1845) = iprot.readListBegin() - for _i1849 in range(_size1845): - _elem1850 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1850) + (_etype1855, _size1852) = iprot.readListBegin() + for _i1856 in range(_size1852): + _elem1857 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1857) iprot.readListEnd() else: iprot.skip(ftype) @@ -46931,8 +46931,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1851 in self.success: - oprot.writeString(iter1851.encode('utf-8') if sys.version_info[0] == 2 else iter1851) + for iter1858 in self.success: + oprot.writeString(iter1858.encode('utf-8') if sys.version_info[0] == 2 else iter1858) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -47815,10 +47815,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1855, _size1852) = iprot.readListBegin() - for _i1856 in range(_size1852): - _elem1857 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1857) + (_etype1862, _size1859) = iprot.readListBegin() + for _i1863 in range(_size1859): + _elem1864 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1864) iprot.readListEnd() else: iprot.skip(ftype) @@ -47835,8 +47835,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1858 in self.success: - oprot.writeString(iter1858.encode('utf-8') if sys.version_info[0] == 2 else iter1858) + for iter1865 in self.success: + oprot.writeString(iter1865.encode('utf-8') if sys.version_info[0] == 2 else iter1865) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -48333,10 +48333,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1862, _size1859) = iprot.readListBegin() - for _i1863 in range(_size1859): - _elem1864 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1864) + (_etype1869, _size1866) = iprot.readListBegin() + for _i1870 in range(_size1866): + _elem1871 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1871) iprot.readListEnd() else: iprot.skip(ftype) @@ -48353,8 +48353,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1865 in self.success: - oprot.writeString(iter1865.encode('utf-8') if sys.version_info[0] == 2 else iter1865) + for iter1872 in self.success: + oprot.writeString(iter1872.encode('utf-8') if sys.version_info[0] == 2 else iter1872) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -49521,11 +49521,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.MAP: self.writeIds = {} - (_ktype1867, _vtype1868, _size1866) = iprot.readMapBegin() - for _i1870 in range(_size1866): - _key1871 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - _val1872 = iprot.readI64() - self.writeIds[_key1871] = _val1872 + (_ktype1874, _vtype1875, _size1873) = iprot.readMapBegin() + for _i1877 in range(_size1873): + _key1878 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + _val1879 = iprot.readI64() + self.writeIds[_key1878] = _val1879 iprot.readMapEnd() else: iprot.skip(ftype) @@ -49546,9 +49546,9 @@ def write(self, oprot): if self.writeIds is not None: oprot.writeFieldBegin('writeIds', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.I64, len(self.writeIds)) - for kiter1873, viter1874 in self.writeIds.items(): - oprot.writeString(kiter1873.encode('utf-8') if sys.version_info[0] == 2 else kiter1873) - oprot.writeI64(viter1874) + for kiter1880, viter1881 in self.writeIds.items(): + oprot.writeString(kiter1880.encode('utf-8') if sys.version_info[0] == 2 else kiter1880) + oprot.writeI64(viter1881) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -52161,10 +52161,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1878, _size1875) = iprot.readListBegin() - for _i1879 in range(_size1875): - _elem1880 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1880) + (_etype1885, _size1882) = iprot.readListBegin() + for _i1886 in range(_size1882): + _elem1887 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1887) iprot.readListEnd() else: iprot.skip(ftype) @@ -52181,8 +52181,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1881 in self.success: - oprot.writeString(iter1881.encode('utf-8') if sys.version_info[0] == 2 else iter1881) + for iter1888 in self.success: + oprot.writeString(iter1888.encode('utf-8') if sys.version_info[0] == 2 else iter1888) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -58738,11 +58738,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1885, _size1882) = iprot.readListBegin() - for _i1886 in range(_size1882): - _elem1887 = SchemaVersion() - _elem1887.read(iprot) - self.success.append(_elem1887) + (_etype1892, _size1889) = iprot.readListBegin() + for _i1893 in range(_size1889): + _elem1894 = SchemaVersion() + _elem1894.read(iprot) + self.success.append(_elem1894) iprot.readListEnd() else: iprot.skip(ftype) @@ -58769,8 +58769,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1888 in self.success: - iter1888.write(oprot) + for iter1895 in self.success: + iter1895.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -60159,11 +60159,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1892, _size1889) = iprot.readListBegin() - for _i1893 in range(_size1889): - _elem1894 = RuntimeStat() - _elem1894.read(iprot) - self.success.append(_elem1894) + (_etype1899, _size1896) = iprot.readListBegin() + for _i1900 in range(_size1896): + _elem1901 = RuntimeStat() + _elem1901.read(iprot) + self.success.append(_elem1901) iprot.readListEnd() else: iprot.skip(ftype) @@ -60185,8 +60185,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1895 in self.success: - iter1895.write(oprot) + for iter1902 in self.success: + iter1902.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -61823,10 +61823,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1899, _size1896) = iprot.readListBegin() - for _i1900 in range(_size1896): - _elem1901 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1901) + (_etype1906, _size1903) = iprot.readListBegin() + for _i1907 in range(_size1903): + _elem1908 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1908) iprot.readListEnd() else: iprot.skip(ftype) @@ -61848,8 +61848,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1902 in self.success: - oprot.writeString(iter1902.encode('utf-8') if sys.version_info[0] == 2 else iter1902) + for iter1909 in self.success: + oprot.writeString(iter1909.encode('utf-8') if sys.version_info[0] == 2 else iter1909) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -62241,10 +62241,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1906, _size1903) = iprot.readListBegin() - for _i1907 in range(_size1903): - _elem1908 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() - self.success.append(_elem1908) + (_etype1913, _size1910) = iprot.readListBegin() + for _i1914 in range(_size1910): + _elem1915 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.success.append(_elem1915) iprot.readListEnd() else: iprot.skip(ftype) @@ -62266,8 +62266,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter1909 in self.success: - oprot.writeString(iter1909.encode('utf-8') if sys.version_info[0] == 2 else iter1909) + for iter1916 in self.success: + oprot.writeString(iter1916.encode('utf-8') if sys.version_info[0] == 2 else iter1916) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -62510,11 +62510,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype1913, _size1910) = iprot.readListBegin() - for _i1914 in range(_size1910): - _elem1915 = WriteEventInfo() - _elem1915.read(iprot) - self.success.append(_elem1915) + (_etype1920, _size1917) = iprot.readListBegin() + for _i1921 in range(_size1917): + _elem1922 = WriteEventInfo() + _elem1922.read(iprot) + self.success.append(_elem1922) iprot.readListEnd() else: iprot.skip(ftype) @@ -62536,8 +62536,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter1916 in self.success: - iter1916.write(oprot) + for iter1923 in self.success: + iter1923.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 7b8052771aad..7f3c0e949ffc 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -6478,7 +6478,7 @@ class ColumnStatistics(object): """ - def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None, engine=None,): + def __init__(self, statsDesc=None, statsObj=None, isStatsCompliant=None, engine="hive",): self.statsDesc = statsDesc self.statsObj = statsObj self.isStatsCompliant = isStatsCompliant @@ -7955,7 +7955,7 @@ class SetPartitionsStatsRequest(object): """ - def __init__(self, colStats=None, needMerge=None, writeId=-1, validWriteIdList=None, engine=None,): + def __init__(self, colStats=None, needMerge=None, writeId=-1, validWriteIdList=None, engine="hive",): self.colStats = colStats self.needMerge = needMerge self.writeId = writeId @@ -8041,8 +8041,6 @@ def write(self, oprot): def validate(self): if self.colStats is None: raise TProtocolException(message='Required field colStats is unset!') - if self.engine is None: - raise TProtocolException(message='Required field engine is unset!') return def __repr__(self): @@ -10466,7 +10464,7 @@ class TableStatsRequest(object): """ - def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, validWriteIdList=None, engine=None, id=-1,): + def __init__(self, dbName=None, tblName=None, colNames=None, catName=None, validWriteIdList=None, engine="hive", id=-1,): self.dbName = dbName self.tblName = tblName self.colNames = colNames @@ -10575,8 +10573,6 @@ def validate(self): raise TProtocolException(message='Required field tblName is unset!') if self.colNames is None: raise TProtocolException(message='Required field colNames is unset!') - if self.engine is None: - raise TProtocolException(message='Required field engine is unset!') return def __repr__(self): @@ -10605,7 +10601,7 @@ class PartitionsStatsRequest(object): """ - def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, validWriteIdList=None, engine=None,): + def __init__(self, dbName=None, tblName=None, colNames=None, partNames=None, catName=None, validWriteIdList=None, engine="hive",): self.dbName = dbName self.tblName = tblName self.colNames = colNames @@ -10724,8 +10720,6 @@ def validate(self): raise TProtocolException(message='Required field colNames is unset!') if self.partNames is None: raise TProtocolException(message='Required field partNames is unset!') - if self.engine is None: - raise TProtocolException(message='Required field engine is unset!') return def __repr__(self): @@ -11728,7 +11722,7 @@ class GetPartitionsByNamesRequest(object): """ - def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None, engine=None, validWriteIdList=None, getFileMetadata=None, id=-1, skipColumnSchemaForPartition=None, includeParamKeyPattern=None, excludeParamKeyPattern=None,): + def __init__(self, db_name=None, tbl_name=None, names=None, get_col_stats=None, processorCapabilities=None, processorIdentifier=None, engine="hive", validWriteIdList=None, getFileMetadata=None, id=-1, skipColumnSchemaForPartition=None, includeParamKeyPattern=None, excludeParamKeyPattern=None,): self.db_name = db_name self.tbl_name = tbl_name self.names = names @@ -20259,7 +20253,7 @@ class GetTableRequest(object): """ - def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None, getColumnStats=None, processorCapabilities=None, processorIdentifier=None, engine=None, id=-1,): + def __init__(self, dbName=None, tblName=None, capabilities=None, catName=None, validWriteIdList=None, getColumnStats=None, processorCapabilities=None, processorIdentifier=None, engine="hive", id=-1,): self.dbName = dbName self.tblName = tblName self.capabilities = capabilities @@ -28595,11 +28589,12 @@ class GetPartitionsPsWithAuthRequest(object): - skipColumnSchemaForPartition - includeParamKeyPattern - excludeParamKeyPattern + - partNames """ - def __init__(self, catName=None, dbName=None, tblName=None, partVals=None, maxParts=-1, userName=None, groupNames=None, validWriteIdList=None, id=-1, skipColumnSchemaForPartition=None, includeParamKeyPattern=None, excludeParamKeyPattern=None,): + def __init__(self, catName=None, dbName=None, tblName=None, partVals=None, maxParts=-1, userName=None, groupNames=None, validWriteIdList=None, id=-1, skipColumnSchemaForPartition=None, includeParamKeyPattern=None, excludeParamKeyPattern=None, partNames=None,): self.catName = catName self.dbName = dbName self.tblName = tblName @@ -28612,6 +28607,7 @@ def __init__(self, catName=None, dbName=None, tblName=None, partVals=None, maxPa self.skipColumnSchemaForPartition = skipColumnSchemaForPartition self.includeParamKeyPattern = includeParamKeyPattern self.excludeParamKeyPattern = excludeParamKeyPattern + self.partNames = partNames def read(self, iprot): if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None: @@ -28692,6 +28688,16 @@ def read(self, iprot): self.excludeParamKeyPattern = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() else: iprot.skip(ftype) + elif fid == 13: + if ftype == TType.LIST: + self.partNames = [] + (_etype1323, _size1320) = iprot.readListBegin() + for _i1324 in range(_size1320): + _elem1325 = iprot.readString().decode('utf-8', errors='replace') if sys.version_info[0] == 2 else iprot.readString() + self.partNames.append(_elem1325) + iprot.readListEnd() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -28717,8 +28723,8 @@ def write(self, oprot): if self.partVals is not None: oprot.writeFieldBegin('partVals', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partVals)) - for iter1320 in self.partVals: - oprot.writeString(iter1320.encode('utf-8') if sys.version_info[0] == 2 else iter1320) + for iter1326 in self.partVals: + oprot.writeString(iter1326.encode('utf-8') if sys.version_info[0] == 2 else iter1326) oprot.writeListEnd() oprot.writeFieldEnd() if self.maxParts is not None: @@ -28732,8 +28738,8 @@ def write(self, oprot): if self.groupNames is not None: oprot.writeFieldBegin('groupNames', TType.LIST, 7) oprot.writeListBegin(TType.STRING, len(self.groupNames)) - for iter1321 in self.groupNames: - oprot.writeString(iter1321.encode('utf-8') if sys.version_info[0] == 2 else iter1321) + for iter1327 in self.groupNames: + oprot.writeString(iter1327.encode('utf-8') if sys.version_info[0] == 2 else iter1327) oprot.writeListEnd() oprot.writeFieldEnd() if self.validWriteIdList is not None: @@ -28756,6 +28762,13 @@ def write(self, oprot): oprot.writeFieldBegin('excludeParamKeyPattern', TType.STRING, 12) oprot.writeString(self.excludeParamKeyPattern.encode('utf-8') if sys.version_info[0] == 2 else self.excludeParamKeyPattern) oprot.writeFieldEnd() + if self.partNames is not None: + oprot.writeFieldBegin('partNames', TType.LIST, 13) + oprot.writeListBegin(TType.STRING, len(self.partNames)) + for iter1328 in self.partNames: + oprot.writeString(iter1328.encode('utf-8') if sys.version_info[0] == 2 else iter1328) + oprot.writeListEnd() + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -28801,11 +28814,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype1325, _size1322) = iprot.readListBegin() - for _i1326 in range(_size1322): - _elem1327 = Partition() - _elem1327.read(iprot) - self.partitions.append(_elem1327) + (_etype1332, _size1329) = iprot.readListBegin() + for _i1333 in range(_size1329): + _elem1334 = Partition() + _elem1334.read(iprot) + self.partitions.append(_elem1334) iprot.readListEnd() else: iprot.skip(ftype) @@ -28822,8 +28835,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter1328 in self.partitions: - iter1328.write(oprot) + for iter1335 in self.partitions: + iter1335.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28987,11 +29000,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.replicationMetricList = [] - (_etype1332, _size1329) = iprot.readListBegin() - for _i1333 in range(_size1329): - _elem1334 = ReplicationMetrics() - _elem1334.read(iprot) - self.replicationMetricList.append(_elem1334) + (_etype1339, _size1336) = iprot.readListBegin() + for _i1340 in range(_size1336): + _elem1341 = ReplicationMetrics() + _elem1341.read(iprot) + self.replicationMetricList.append(_elem1341) iprot.readListEnd() else: iprot.skip(ftype) @@ -29008,8 +29021,8 @@ def write(self, oprot): if self.replicationMetricList is not None: oprot.writeFieldBegin('replicationMetricList', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.replicationMetricList)) - for iter1335 in self.replicationMetricList: - iter1335.write(oprot) + for iter1342 in self.replicationMetricList: + iter1342.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29134,10 +29147,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.excludeTxnTypes = [] - (_etype1339, _size1336) = iprot.readListBegin() - for _i1340 in range(_size1336): - _elem1341 = iprot.readI32() - self.excludeTxnTypes.append(_elem1341) + (_etype1346, _size1343) = iprot.readListBegin() + for _i1347 in range(_size1343): + _elem1348 = iprot.readI32() + self.excludeTxnTypes.append(_elem1348) iprot.readListEnd() else: iprot.skip(ftype) @@ -29154,8 +29167,8 @@ def write(self, oprot): if self.excludeTxnTypes is not None: oprot.writeFieldBegin('excludeTxnTypes', TType.LIST, 1) oprot.writeListBegin(TType.I32, len(self.excludeTxnTypes)) - for iter1342 in self.excludeTxnTypes: - oprot.writeI32(iter1342) + for iter1349 in self.excludeTxnTypes: + oprot.writeI32(iter1349) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31708,7 +31721,7 @@ def __ne__(self, other): (1, TType.STRUCT, 'statsDesc', [ColumnStatisticsDesc, None], None, ), # 1 (2, TType.LIST, 'statsObj', (TType.STRUCT, [ColumnStatisticsObj, None], False), None, ), # 2 (3, TType.BOOL, 'isStatsCompliant', None, None, ), # 3 - (4, TType.STRING, 'engine', 'UTF8', None, ), # 4 + (4, TType.STRING, 'engine', 'UTF8', "hive", ), # 4 ) all_structs.append(FileMetadata) FileMetadata.thrift_spec = ( @@ -31826,7 +31839,7 @@ def __ne__(self, other): (2, TType.BOOL, 'needMerge', None, None, ), # 2 (3, TType.I64, 'writeId', None, -1, ), # 3 (4, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 4 - (5, TType.STRING, 'engine', 'UTF8', None, ), # 5 + (5, TType.STRING, 'engine', 'UTF8', "hive", ), # 5 ) all_structs.append(SetPartitionsStatsResponse) SetPartitionsStatsResponse.thrift_spec = ( @@ -32025,7 +32038,7 @@ def __ne__(self, other): (3, TType.LIST, 'colNames', (TType.STRING, 'UTF8', False), None, ), # 3 (4, TType.STRING, 'catName', 'UTF8', None, ), # 4 (5, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 5 - (6, TType.STRING, 'engine', 'UTF8', None, ), # 6 + (6, TType.STRING, 'engine', 'UTF8', "hive", ), # 6 (7, TType.I64, 'id', None, -1, ), # 7 ) all_structs.append(PartitionsStatsRequest) @@ -32037,7 +32050,7 @@ def __ne__(self, other): (4, TType.LIST, 'partNames', (TType.STRING, 'UTF8', False), None, ), # 4 (5, TType.STRING, 'catName', 'UTF8', None, ), # 5 (6, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 6 - (7, TType.STRING, 'engine', 'UTF8', None, ), # 7 + (7, TType.STRING, 'engine', 'UTF8', "hive", ), # 7 ) all_structs.append(AddPartitionsResult) AddPartitionsResult.thrift_spec = ( @@ -32123,7 +32136,7 @@ def __ne__(self, other): (4, TType.BOOL, 'get_col_stats', None, None, ), # 4 (5, TType.LIST, 'processorCapabilities', (TType.STRING, 'UTF8', False), None, ), # 5 (6, TType.STRING, 'processorIdentifier', 'UTF8', None, ), # 6 - (7, TType.STRING, 'engine', 'UTF8', None, ), # 7 + (7, TType.STRING, 'engine', 'UTF8', "hive", ), # 7 (8, TType.STRING, 'validWriteIdList', 'UTF8', None, ), # 8 (9, TType.BOOL, 'getFileMetadata', None, None, ), # 9 (10, TType.I64, 'id', None, -1, ), # 10 @@ -32815,7 +32828,7 @@ def __ne__(self, other): (7, TType.BOOL, 'getColumnStats', None, None, ), # 7 (8, TType.LIST, 'processorCapabilities', (TType.STRING, 'UTF8', False), None, ), # 8 (9, TType.STRING, 'processorIdentifier', 'UTF8', None, ), # 9 - (10, TType.STRING, 'engine', 'UTF8', None, ), # 10 + (10, TType.STRING, 'engine', 'UTF8', "hive", ), # 10 (11, TType.I64, 'id', None, -1, ), # 11 ) all_structs.append(GetTableResult) @@ -33513,6 +33526,7 @@ def __ne__(self, other): (10, TType.BOOL, 'skipColumnSchemaForPartition', None, None, ), # 10 (11, TType.STRING, 'includeParamKeyPattern', 'UTF8', None, ), # 11 (12, TType.STRING, 'excludeParamKeyPattern', 'UTF8', None, ), # 12 + (13, TType.LIST, 'partNames', (TType.STRING, 'UTF8', False), None, ), # 13 ) all_structs.append(GetPartitionsPsWithAuthResponse) GetPartitionsPsWithAuthResponse.thrift_spec = ( diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb index ab4c608d8e8e..b5fcabee2504 100644 --- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -2342,7 +2342,7 @@ class ColumnStatistics STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc}, STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}, ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}, - ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional => true} + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -2656,14 +2656,13 @@ class SetPartitionsStatsRequest NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}, WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, - ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats - raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -3274,7 +3273,7 @@ class TableStatsRequest COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, - ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default => %q"hive", :optional => true}, ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1, :optional => true} } @@ -3284,7 +3283,6 @@ def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colNames is unset!') unless @colNames - raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -3307,7 +3305,7 @@ class PartitionsStatsRequest PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}}, CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, - ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine'} + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default => %q"hive", :optional => true} } def struct_fields; FIELDS; end @@ -3317,7 +3315,6 @@ def validate raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colNames is unset!') unless @colNames raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partNames is unset!') unless @partNames - raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field engine is unset!') unless @engine end ::Thrift::Struct.generate_accessors self @@ -3574,7 +3571,7 @@ class GetPartitionsByNamesRequest GET_COL_STATS => {:type => ::Thrift::Types::BOOL, :name => 'get_col_stats', :optional => true}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, - ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default => %q"hive", :optional => true}, VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}, GETFILEMETADATA => {:type => ::Thrift::Types::BOOL, :name => 'getFileMetadata', :optional => true}, ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1, :optional => true}, @@ -5705,7 +5702,7 @@ class GetTableRequest GETCOLUMNSTATS => {:type => ::Thrift::Types::BOOL, :name => 'getColumnStats', :optional => true}, PROCESSORCAPABILITIES => {:type => ::Thrift::Types::LIST, :name => 'processorCapabilities', :element => {:type => ::Thrift::Types::STRING}, :optional => true}, PROCESSORIDENTIFIER => {:type => ::Thrift::Types::STRING, :name => 'processorIdentifier', :optional => true}, - ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :optional => true}, + ENGINE => {:type => ::Thrift::Types::STRING, :name => 'engine', :default => %q"hive", :optional => true}, ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1, :optional => true} } @@ -7801,6 +7798,7 @@ class GetPartitionsPsWithAuthRequest SKIPCOLUMNSCHEMAFORPARTITION = 10 INCLUDEPARAMKEYPATTERN = 11 EXCLUDEPARAMKEYPATTERN = 12 + PARTNAMES = 13 FIELDS = { CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}, @@ -7814,7 +7812,8 @@ class GetPartitionsPsWithAuthRequest ID => {:type => ::Thrift::Types::I64, :name => 'id', :default => -1, :optional => true}, SKIPCOLUMNSCHEMAFORPARTITION => {:type => ::Thrift::Types::BOOL, :name => 'skipColumnSchemaForPartition', :optional => true}, INCLUDEPARAMKEYPATTERN => {:type => ::Thrift::Types::STRING, :name => 'includeParamKeyPattern', :optional => true}, - EXCLUDEPARAMKEYPATTERN => {:type => ::Thrift::Types::STRING, :name => 'excludeParamKeyPattern', :optional => true} + EXCLUDEPARAMKEYPATTERN => {:type => ::Thrift::Types::STRING, :name => 'excludeParamKeyPattern', :optional => true}, + PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}, :optional => true} } def struct_fields; FIELDS; end diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java index 695a3282838c..115942b9b8f9 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaHook.java @@ -21,6 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.hive.metastore.api.CreateTableRequest; import org.apache.hadoop.hive.metastore.api.EnvironmentContext; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Table; @@ -76,6 +77,17 @@ public interface HiveMetaHook { void preCreateTable(Table table) throws MetaException; + /** + * Called before a new table definition is added to the metastore + * during CREATE TABLE. + * + * @param request the whole request to create a new table + */ + default void preCreateTable(CreateTableRequest request) + throws MetaException { + preCreateTable(request.getTable()); + } + /** * Called after failure adding a new table definition to the metastore * during CREATE TABLE. diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 9b7cbb82df67..862096cb4d8e 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -1272,7 +1272,8 @@ public Map> getPartitionColumnStatistics( List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, - partNames == null ? new ArrayList() : partNames, engine); + partNames == null ? new ArrayList() : partNames); + rqst.setEngine(engine); rqst.setCatName(catName); rqst.setValidWriteIdList(validWriteIdList); return client.get_partitions_statistics_req(rqst).getPartStats(); @@ -1297,7 +1298,8 @@ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblNam LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(), 0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames, engine); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setEngine(engine); req.setCatName(catName); req.setValidWriteIdList(writeIdList); @@ -1479,7 +1481,7 @@ public void createTable(CreateTableRequest request) throws HiveMetaHook hook = getHook(tbl); if (hook != null) { - hook.preCreateTable(tbl); + hook.preCreateTable(request); } boolean success = false; try { @@ -3446,7 +3448,8 @@ public List getTableColumnStatistics(String catName, String if (colNames.isEmpty()) { return Collections.emptyList(); } - TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames, engine); + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); + rqst.setEngine(engine); rqst.setCatName(catName); rqst.setEngine(engine); return getTableColumnStatisticsInternal(rqst).getTableStats(); @@ -3479,7 +3482,7 @@ public List getTableColumnStatistics(String catName, String if (colNames.isEmpty()) { return Collections.emptyList(); } - TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames, engine); + TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames); rqst.setEngine(engine); rqst.setCatName(catName); rqst.setValidWriteIdList(validWriteIdList); @@ -3505,8 +3508,8 @@ public Map> getPartitionColumnStatistics( public Map> getPartitionColumnStatistics( String catName, String dbName, String tableName, List partNames, List colNames, String engine) throws TException { - PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, - partNames, engine); + PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames, partNames); + rqst.setEngine(engine); rqst.setCatName(catName); rqst.setValidWriteIdList(getValidWriteIdList(dbName, tableName)); return client.get_partitions_statistics_req(rqst).getPartStats(); @@ -4659,7 +4662,8 @@ public AggrStats getAggrColStatsFor(String catName, String dbName, String tblNam LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(), 0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames, engine); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setEngine(engine); req.setCatName(catName); req.setValidWriteIdList(getValidWriteIdList(dbName, tblName)); diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index 7afce57987f0..5a0c8a4ba0f5 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -94,14 +94,17 @@ public class MetastoreConf { static final String METASTORE_DELEGATION_MANAGER_CLASS = "org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager"; @VisibleForTesting - static final String ACID_HOUSE_KEEPER_SERVICE_CLASS = - "org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService"; + static final String ACID_HOUSEKEEPER_SERVICE_CLASS = + "org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService"; + @VisibleForTesting + static final String COMPACTION_HOUSEKEEPER_SERVICE_CLASS = + "org.apache.hadoop.hive.metastore.txn.service.CompactionHouseKeeperService"; @VisibleForTesting static final String ACID_TXN_CLEANER_SERVICE_CLASS = - "org.apache.hadoop.hive.metastore.txn.AcidTxnCleanerService"; + "org.apache.hadoop.hive.metastore.txn.service.AcidTxnCleanerService"; @VisibleForTesting static final String ACID_OPEN_TXNS_COUNTER_SERVICE_CLASS = - "org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService"; + "org.apache.hadoop.hive.metastore.txn.service.AcidOpenTxnsCounterService"; public static final String METASTORE_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME = "metastore.authentication.ldap.userMembershipKey"; @@ -293,6 +296,9 @@ public enum ConfVars { ACID_HOUSEKEEPER_SERVICE_INTERVAL("metastore.acid.housekeeper.interval", "hive.metastore.acid.housekeeper.interval", 60, TimeUnit.SECONDS, "Time interval describing how often the acid housekeeper runs."), + COMPACTION_HOUSEKEEPER_SERVICE_INTERVAL("metastore.compaction.housekeeper.interval", + "hive.metastore.compaction.housekeeper.interval", 300, TimeUnit.SECONDS, + "Time interval describing how often the acid compaction housekeeper runs."), ACID_TXN_CLEANER_INTERVAL("metastore.acid.txn.cleaner.interval", "hive.metastore.acid.txn.cleaner.interval", 10, TimeUnit.SECONDS, "Time interval describing how often aborted and committed txns are cleaned."), @@ -353,9 +359,10 @@ public enum ConfVars { + "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not " + "recommended for production use cases, run schematool command instead." ), BATCH_RETRIEVE_MAX("metastore.batch.retrieve.max", "hive.metastore.batch.retrieve.max", 300, + new RangeValidator(1, null), "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" + "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" + - "but it may also cause higher memory requirement at the client side."), + "but it may also cause higher memory requirement at the client side. Batch value should be greater than 0"), BATCH_RETRIEVE_OBJECTS_MAX("metastore.batch.retrieve.table.partition.max", "hive.metastore.batch.retrieve.table.partition.max", 1000, "Maximum number of objects that metastore internally retrieves in one batch."), @@ -579,7 +586,7 @@ public enum ConfVars { "metastore.housekeeping.leader.election", "host", new StringSetValidator("host", "lock"), "Set to host, HMS will choose the leader by the configured metastore.housekeeping.leader.hostname.\n" + - "Set to lock, HMS will use the hive lock to elect the leader."), + "Set to lock, HMS will use the Hive lock to elect the leader."), METASTORE_HOUSEKEEPING_LEADER_AUDITTABLE("metastore.housekeeping.leader.auditTable", "metastore.housekeeping.leader.auditTable", "", "Audit the leader election event to a plain json table when configured."), @@ -592,6 +599,9 @@ public enum ConfVars { "metastore.housekeeping.leader.auditFiles.limit", 10, "Limit the number of small audit files when metastore.housekeeping.leader.newAuditFile is true.\n" + "If the number of audit files exceeds the limit, then the oldest will be deleted."), + METASTORE_HOUSEKEEPING_LEADER_LOCK_NAMESPACE("metastore.housekeeping.leader.lock.namespace", + "metastore.housekeeping.leader.lock.namespace", "", + "The database where the Hive lock sits when metastore.housekeeping.leader.election is set to lock."), METASTORE_HOUSEKEEPING_THREADS_ON("metastore.housekeeping.threads.on", "hive.metastore.housekeeping.threads.on", false, "Whether to run the tasks under metastore.task.threads.remote on this metastore instance or not.\n" + @@ -1469,7 +1479,8 @@ public enum ConfVars { "always be started, regardless of whether the metastore is running in embedded mode " + "or in server mode. They must implement " + METASTORE_TASK_THREAD_CLASS), TASK_THREADS_REMOTE_ONLY("metastore.task.threads.remote", "metastore.task.threads.remote", - ACID_HOUSE_KEEPER_SERVICE_CLASS + "," + + ACID_HOUSEKEEPER_SERVICE_CLASS + "," + + COMPACTION_HOUSEKEEPER_SERVICE_CLASS + "," + ACID_TXN_CLEANER_SERVICE_CLASS + "," + ACID_OPEN_TXNS_COUNTER_SERVICE_CLASS + "," + MATERIALZIATIONS_REBUILD_LOCK_CLEANER_TASK_CLASS + "," + @@ -1633,7 +1644,7 @@ public enum ConfVars { "Time before an open transaction operation should persist, otherwise it is considered invalid and rolled back"), TXN_USE_MIN_HISTORY_LEVEL("metastore.txn.use.minhistorylevel", "hive.txn.use.minhistorylevel", true, "Set this to false, for the TxnHandler and Cleaner to not use MIN_HISTORY_LEVEL table and take advantage of openTxn optimisation.\n" - + "If the table is dropped HMS will switch this flag to false."), + + "If the table is dropped HMS will switch this flag to false, any other value changes need a restart to take effect."), TXN_USE_MIN_HISTORY_WRITE_ID("metastore.txn.use.minhistorywriteid", "hive.txn.use.minhistorywriteid", false, "Set this to true, to avoid global minOpenTxn check in Cleaner.\n" + "If the table is dropped HMS will switch this flag to false."), diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java deleted file mode 100644 index f3f0e5d939b9..000000000000 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnQueries.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.txn; - -public class TxnQueries { - public static final String SHOW_COMPACTION_ORDERBY_CLAUSE = - " ORDER BY CASE " + - " WHEN \"CC_END\" > \"CC_START\" and \"CC_END\" > \"CC_COMMIT_TIME\" " + - " THEN \"CC_END\" " + - " WHEN \"CC_START\" > \"CC_COMMIT_TIME\" " + - " THEN \"CC_START\" " + - " ELSE \"CC_COMMIT_TIME\" " + - " END desc ," + - " \"CC_ENQUEUE_TIME\" asc"; - - public static final String SHOW_COMPACTION_QUERY = - "SELECT XX.* FROM ( SELECT " + - " \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\", " + - " \"CQ_STATE\" AS \"CC_STATE\", \"CQ_TYPE\" AS \"CC_TYPE\", \"CQ_WORKER_ID\" AS \"CC_WORKER_ID\", " + - " \"CQ_START\" AS \"CC_START\", -1 \"CC_END\", \"CQ_RUN_AS\" AS \"CC_RUN_AS\", " + - " \"CQ_HADOOP_JOB_ID\" AS \"CC_HADOOP_JOB_ID\", \"CQ_ID\" AS \"CC_ID\", \"CQ_ERROR_MESSAGE\" AS \"CC_ERROR_MESSAGE\", " + - " \"CQ_ENQUEUE_TIME\" AS \"CC_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\" AS \"CC_WORKER_VERSION\", " + - " \"CQ_INITIATOR_ID\" AS \"CC_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\" AS \"CC_INITIATOR_VERSION\", " + - " \"CQ_CLEANER_START\" AS \"CC_CLEANER_START\", \"CQ_POOL_NAME\" AS \"CC_POOL_NAME\", \"CQ_TXN_ID\" AS \"CC_TXN_ID\", " + - " \"CQ_NEXT_TXN_ID\" AS \"CC_NEXT_TXN_ID\", \"CQ_COMMIT_TIME\" AS \"CC_COMMIT_TIME\", " + - " \"CQ_HIGHEST_WRITE_ID\" AS \"CC_HIGHEST_WRITE_ID\" " + - "FROM " + - " \"COMPACTION_QUEUE\" " + - "UNION ALL " + - "SELECT " + - " \"CC_DATABASE\" , \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", \"CC_WORKER_ID\", " + - " \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\", \"CC_ERROR_MESSAGE\", " + - " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", " + - " -1 , \"CC_POOL_NAME\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", \"CC_COMMIT_TIME\", " + - " \"CC_HIGHEST_WRITE_ID\"" + - "FROM " + - " \"COMPLETED_COMPACTIONS\" ) XX "; - - - public static final String SELECT_COMPACTION_QUEUE_BY_COMPID = - "SELECT XX.* FROM ( SELECT " + - " \"CQ_ID\" AS \"CC_ID\", \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\", " + - " \"CQ_STATE\" AS \"CC_STATE\", \"CQ_TYPE\" AS \"CC_TYPE\", \"CQ_TBLPROPERTIES\" AS \"CC_TBLPROPERTIES\", \"CQ_WORKER_ID\" AS \"CC_WORKER_ID\", " + - " \"CQ_START\" AS \"CC_START\", \"CQ_RUN_AS\" AS \"CC_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\" AS \"CC_HIGHEST_WRITE_ID\", \"CQ_META_INFO\" AS \"CC_META_INFO\"," + - " \"CQ_HADOOP_JOB_ID\" AS \"CC_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\" AS \"CC_ERROR_MESSAGE\", \"CQ_ENQUEUE_TIME\" AS \"CC_ENQUEUE_TIME\"," + - " \"CQ_WORKER_VERSION\" AS \"CC_WORKER_VERSION\", \"CQ_INITIATOR_ID\" AS \"CC_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\" AS \"CC_INITIATOR_VERSION\", " + - " \"CQ_RETRY_RETENTION\" AS \"CC_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\" AS \"CC_NEXT_TXN_ID\", \"CQ_TXN_ID\" AS \"CC_TXN_ID\", " + - " \"CQ_COMMIT_TIME\" AS \"CC_COMMIT_TIME\", \"CQ_POOL_NAME\" AS \"CC_POOL_NAME\", " + - " \"CQ_NUMBER_OF_BUCKETS\" AS \"CC_NUMBER_OF_BUCKETS\", \"CQ_ORDER_BY\" AS \"CC_ORDER_BY\" " + - " FROM " + - " \"COMPACTION_QUEUE\" " + - " UNION ALL " + - " SELECT " + - " \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", " + - " \"CC_TBLPROPERTIES\", \"CC_WORKER_ID\", \"CC_START\", \"CC_RUN_AS\", " + - " \"CC_HIGHEST_WRITE_ID\", \"CC_META_INFO\", \"CC_HADOOP_JOB_ID\", \"CC_ERROR_MESSAGE\", " + - " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", " + - " -1 , \"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", \"CC_POOL_NAME\", " + - " \"CC_NUMBER_OF_BUCKETS\", \"CC_ORDER_BY\" " + - " FROM " + - " \"COMPLETED_COMPACTIONS\") XX "; - - - public static final String INSERT_INTO_COMPLETED_COMPACTION = - "INSERT INTO \"COMPLETED_COMPACTIONS\" " + - " (\"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", " + - " \"CC_TBLPROPERTIES\", \"CC_WORKER_ID\", \"CC_START\", \"CC_END\", \"CC_RUN_AS\", " + - " \"CC_HIGHEST_WRITE_ID\", \"CC_META_INFO\", \"CC_HADOOP_JOB_ID\", \"CC_ERROR_MESSAGE\", " + - " \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\"," + - " \"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_COMMIT_TIME\", \"CC_POOL_NAME\", \"CC_NUMBER_OF_BUCKETS\", " + - " \"CC_ORDER_BY\") " + - " VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; -} diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java index 1aa98d9f1d18..c12c009b81cd 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreUtils.java @@ -107,6 +107,16 @@ public static Date convertStringToDate(String date) { return java.sql.Date.valueOf(val); } + /** + * Converts the string format date without a time-zone to + * a time-zone based string format date + * @param date the date without a time-zone + * @return time-zone based string format date + */ + public static String normalizeDate(String date) { + return convertDateToString(convertStringToDate(date)); + } + /** * Converts java.sql.Timestamp to string format timestamp. * @param timestamp java.sql.Timestamp object. diff --git a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift index e5869736403f..cbe1d5c96b38 100644 --- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift +++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift @@ -600,7 +600,7 @@ struct ColumnStatistics { 2: required list statsObj, 3: optional bool isStatsCompliant, // Are the stats isolation-level-compliant with the // the calling query? -4: optional string engine +4: optional string engine = "hive" } // FileMetadata represents the table-level (in case of unpartitioned) or partition-level @@ -725,7 +725,7 @@ struct SetPartitionsStatsRequest { 2: optional bool needMerge, //stats need to be merged with the existing stats 3: optional i64 writeId=-1, // writeId for the current query that updates the stats 4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent -5: required string engine //engine creating the current request +5: optional string engine = "hive" //engine creating the current request } struct SetPartitionsStatsResponse { @@ -901,7 +901,7 @@ struct TableStatsRequest { 3: required list colNames 4: optional string catName, 5: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent - 6: required string engine, //engine creating the current request + 6: optional string engine = "hive", //engine creating the current request 7: optional i64 id=-1 // table id } @@ -912,7 +912,7 @@ struct PartitionsStatsRequest { 4: required list partNames, 5: optional string catName, 6: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent - 7: required string engine //engine creating the current request + 7: optional string engine = "hive" //engine creating the current request } // Return type for add_partitions_req @@ -993,7 +993,7 @@ struct GetPartitionsByNamesRequest { 4: optional bool get_col_stats, 5: optional list processorCapabilities, 6: optional string processorIdentifier, - 7: optional string engine, + 7: optional string engine = "hive", 8: optional string validWriteIdList, 9: optional bool getFileMetadata, 10: optional i64 id=-1, // table id @@ -1710,7 +1710,7 @@ struct GetTableRequest { 7: optional bool getColumnStats, 8: optional list processorCapabilities, 9: optional string processorIdentifier, - 10: optional string engine, + 10: optional string engine = "hive", 11: optional i64 id=-1 // table id } @@ -2360,7 +2360,8 @@ struct GetPartitionsPsWithAuthRequest { 9: optional i64 id=-1 // table id 10: optional bool skipColumnSchemaForPartition, 11: optional string includeParamKeyPattern, - 12: optional string excludeParamKeyPattern + 12: optional string excludeParamKeyPattern, + 13: optional list partNames; } struct GetPartitionsPsWithAuthResponse { diff --git a/standalone-metastore/metastore-server/pom.xml b/standalone-metastore/metastore-server/pom.xml index 2268c71031cb..8367ad533d5b 100644 --- a/standalone-metastore/metastore-server/pom.xml +++ b/standalone-metastore/metastore-server/pom.xml @@ -16,7 +16,7 @@ hive-standalone-metastore org.apache.hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT 4.0.0 hive-standalone-metastore-server @@ -28,12 +28,12 @@ org.apache.hive hive-standalone-metastore-common - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT org.apache.hive hive-metastore - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT test diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Batchable.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Batchable.java index e3fd5a4bf14a..571d6bdbd1dc 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Batchable.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Batchable.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hive.metastore; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import javax.jdo.Query; @@ -38,7 +39,7 @@ public abstract class Batchable { public static final int NO_BATCHING = -1; private List queries = null; - public abstract List run(List input) throws MetaException; + public abstract List run(List input) throws Exception; public void addQueryAfterUse(Query query) { if (queries == null) { @@ -70,18 +71,25 @@ public static List runBatched( final int batchSize, List input, Batchable runnable) throws MetaException { - if (batchSize == NO_BATCHING || batchSize >= input.size()) { - return runnable.run(input); + if (input == null || input.isEmpty()) { + return Collections.emptyList(); } - List result = new ArrayList(input.size()); - for (int fromIndex = 0, toIndex = 0; toIndex < input.size(); fromIndex = toIndex) { - toIndex = Math.min(fromIndex + batchSize, input.size()); - List batchedInput = input.subList(fromIndex, toIndex); - List batchedOutput = runnable.run(batchedInput); - if (batchedOutput != null) { - result.addAll(batchedOutput); + try { + if (batchSize == NO_BATCHING || batchSize >= input.size()) { + return runnable.run(input); } + List result = new ArrayList<>(input.size()); + for (int fromIndex = 0, toIndex = 0; toIndex < input.size(); fromIndex = toIndex) { + toIndex = Math.min(fromIndex + batchSize, input.size()); + List batchedInput = input.subList(fromIndex, toIndex); + List batchedOutput = runnable.run(batchedInput); + if (batchedOutput != null) { + result.addAll(batchedOutput); + } + } + return result; + } catch (Exception e) { + throw ExceptionHandler.newMetaException(e); } - return result; } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java index afee07974204..865365722fc8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DatabaseProduct.java @@ -27,6 +27,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Stream; import org.apache.commons.lang3.exception.ExceptionUtils; @@ -35,6 +36,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.util.ReflectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -50,13 +52,19 @@ * */ public class DatabaseProduct implements Configurable { static final private Logger LOG = LoggerFactory.getLogger(DatabaseProduct.class.getName()); - private static final Class[] unrecoverableSqlExceptions = new Class[]{ - // TODO: collect more unrecoverable SQLExceptions - SQLIntegrityConstraintViolationException.class + private static final Class[] unrecoverableExceptions = new Class[]{ + // TODO: collect more unrecoverable Exceptions + SQLIntegrityConstraintViolationException.class, + DeadlineException.class }; + /** + * Derby specific concurrency control + */ + private static final ReentrantLock derbyLock = new ReentrantLock(true); + public enum DbType {DERBY, MYSQL, POSTGRES, ORACLE, SQLSERVER, CUSTOM, UNDEFINED}; - public DbType dbType; + static public DbType dbType; // Singleton instance private static DatabaseProduct theDatabaseProduct; @@ -162,7 +170,7 @@ private static DbType getDbType(String productName) { } public static boolean isRecoverableException(Throwable t) { - return Stream.of(unrecoverableSqlExceptions) + return Stream.of(unrecoverableExceptions) .allMatch(ex -> ExceptionUtils.indexOfType(t, ex) < 0); } @@ -205,10 +213,11 @@ public boolean isDeadlock(SQLException e) { /** * Is the given exception a table not found exception - * @param e Exception + * @param t Exception * @return */ - public boolean isTableNotExistsError(SQLException e) { + public boolean isTableNotExistsError(Throwable t) { + SQLException e = TxnUtils.getSqlException(t); return (isPOSTGRES() && "42P01".equalsIgnoreCase(e.getSQLState())) || (isMYSQL() && "42S02".equalsIgnoreCase(e.getSQLState())) || (isORACLE() && "42000".equalsIgnoreCase(e.getSQLState()) && e.getMessage().contains("ORA-00942")) @@ -261,7 +270,9 @@ protected String toDate(String tableValue) { protected String toTimestamp(String tableValue) { if (isORACLE()) { - return "TO_TIMESTAMP(" + tableValue + ", 'YYYY-MM-DD HH:mm:ss')"; + return "TO_TIMESTAMP(" + tableValue + ", 'YYYY-MM-DD HH24:mi:ss')"; + } else if (isSQLSERVER()) { + return "CONVERT(DATETIME, " + tableValue + ")"; } else { return "cast(" + tableValue + " as TIMESTAMP)"; } @@ -558,41 +569,42 @@ public boolean supportsGetGeneratedKeys() throws MetaException { } } - public boolean isDuplicateKeyError(SQLException ex) { + public boolean isDuplicateKeyError(Throwable t) { + SQLException sqlEx = TxnUtils.getSqlException(t); switch (dbType) { case DERBY: case CUSTOM: // ANSI SQL - if("23505".equals(ex.getSQLState())) { + if("23505".equals(sqlEx.getSQLState())) { return true; } break; case MYSQL: //https://dev.mysql.com/doc/refman/5.5/en/error-messages-server.html - if((ex.getErrorCode() == 1022 || ex.getErrorCode() == 1062 || ex.getErrorCode() == 1586) - && "23000".equals(ex.getSQLState())) { + if((sqlEx.getErrorCode() == 1022 || sqlEx.getErrorCode() == 1062 || sqlEx.getErrorCode() == 1586) + && "23000".equals(sqlEx.getSQLState())) { return true; } break; case SQLSERVER: //2627 is unique constaint violation incl PK, 2601 - unique key - if ((ex.getErrorCode() == 2627 || ex.getErrorCode() == 2601) && "23000".equals(ex.getSQLState())) { + if ((sqlEx.getErrorCode() == 2627 || sqlEx.getErrorCode() == 2601) && "23000".equals(sqlEx.getSQLState())) { return true; } break; case ORACLE: - if(ex.getErrorCode() == 1 && "23000".equals(ex.getSQLState())) { + if(sqlEx.getErrorCode() == 1 && "23000".equals(sqlEx.getSQLState())) { return true; } break; case POSTGRES: //http://www.postgresql.org/docs/8.1/static/errcodes-appendix.html - if("23505".equals(ex.getSQLState())) { + if("23505".equals(sqlEx.getSQLState())) { return true; } break; default: - String msg = ex.getMessage() + - " (SQLState=" + ex.getSQLState() + ", ErrorCode=" + ex.getErrorCode() + ")"; + String msg = sqlEx.getMessage() + + " (SQLState=" + sqlEx.getSQLState() + ", ErrorCode=" + sqlEx.getErrorCode() + ")"; throw new IllegalArgumentException("Unexpected DB type: " + dbType + "; " + msg); } return false; @@ -745,6 +757,21 @@ public Object getBoolean(boolean val) { return val; } + /** + * Get the max rows in a query with paramSize. + * @param batch the configured batch size + * @param paramSize the parameter size in a query statement + * @return the max allowed rows in a query + */ + public int getMaxRows(int batch, int paramSize) { + if (isSQLSERVER()) { + // SQL Server supports a maximum of 2100 parameters in a request. Adjust the maxRowsInBatch accordingly + int maxAllowedRows = (2100 - paramSize) / paramSize; + return Math.min(batch, maxAllowedRows); + } + return batch; + } + // This class implements the Configurable interface for the benefit // of "plugin" instances created via reflection (see invocation of // ReflectionUtils.newInstance in method determineDatabaseProduct) @@ -757,4 +784,21 @@ public Configuration getConf() { public void setConf(Configuration c) { myConf = c; } + + /** + * lockInternal() and {@link #unlockInternal()} are used to serialize those operations that require + * Select ... For Update to sequence operations properly. In practice that means when running + * with Derby database. See more notes at class level. + */ + public void lockInternal() { + if (isDERBY()) { + derbyLock.lock(); + } + } + + public void unlockInternal() { + if (isDERBY()) { + derbyLock.unlock(); + } + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlInsertPart.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlInsertPart.java index be17470edd66..ba205ebe705b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlInsertPart.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlInsertPart.java @@ -83,13 +83,7 @@ private void insertInBatch(String tableName, String columns, int columnCount, in return; } int maxRowsInBatch = batchSize > 0 ? batchSize : rowCount; - if (dbType.isSQLSERVER()) { - // SQL Server supports a maximum of 2100 parameters in a request. Adjust the maxRowsInBatch accordingly - int maxAllowedRows = (2100 - columnCount) / columnCount; - if (maxRowsInBatch > maxAllowedRows) { - maxRowsInBatch = maxAllowedRows; - } - } + maxRowsInBatch = dbType.getMaxRows(maxRowsInBatch, columnCount); int maxBatches = rowCount / maxRowsInBatch; int last = rowCount % maxRowsInBatch; String rowFormat = "(" + repeat(",?", columnCount).substring(1) + ")"; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java new file mode 100644 index 000000000000..441ce26ac6d7 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdatePart.java @@ -0,0 +1,1493 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SkewedInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.events.UpdatePartitionColumnStatEvent; +import org.apache.hadoop.hive.metastore.events.UpdatePartitionColumnStatEventBatch; +import org.apache.hadoop.hive.metastore.messaging.EventMessage; +import org.apache.hadoop.hive.metastore.model.MColumnDescriptor; +import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; +import org.apache.hadoop.hive.metastore.model.MStringList; +import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.datanucleus.ExecutionContext; +import org.datanucleus.api.jdo.JDOPersistenceManager; +import org.datanucleus.metadata.AbstractClassMetaData; +import org.datanucleus.metadata.IdentityType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.apache.hadoop.conf.Configuration; + +import javax.jdo.PersistenceManager; +import javax.jdo.datastore.JDOConnection; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +import static org.apache.hadoop.hive.common.StatsSetupConst.COLUMN_STATS_ACCURATE; +import static org.apache.hadoop.hive.metastore.HMSHandler.getPartValsFromName; +import static org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.executeWithArray; +import static org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.extractSqlInt; +import static org.apache.hadoop.hive.metastore.MetastoreDirectSqlUtils.extractSqlLong; + +/** + * This class contains the optimizations for MetaStore that rely on direct SQL access to + * the underlying database. It should use ANSI SQL and be compatible with common databases + * such as MySQL (note that MySQL doesn't use full ANSI mode by default), Postgres, etc. + * + * This class separates out the update part from MetaStoreDirectSql class. + */ +class DirectSqlUpdatePart { + private static final Logger LOG = LoggerFactory.getLogger(DirectSqlUpdatePart.class.getName()); + + private final PersistenceManager pm; + private final Configuration conf; + private final DatabaseProduct dbType; + private final int maxBatchSize; + private final SQLGenerator sqlGenerator; + + public DirectSqlUpdatePart(PersistenceManager pm, Configuration conf, + DatabaseProduct dbType, int batchSize) { + this.pm = pm; + this.conf = conf; + this.dbType = dbType; + this.maxBatchSize = batchSize; + sqlGenerator = new SQLGenerator(dbType, conf); + } + + void rollbackDBConn(Connection dbConn) { + try { + if (dbConn != null && !dbConn.isClosed()) dbConn.rollback(); + } catch (SQLException e) { + LOG.warn("Failed to rollback db connection ", e); + } + } + + void closeDbConn(JDOConnection jdoConn) { + try { + if (jdoConn != null) { + jdoConn.close(); + } + } catch (Exception e) { + LOG.warn("Failed to close db connection", e); + } + } + + static String quoteString(String input) { + return "'" + input + "'"; + } + + private void populateInsertUpdateMap(Map statsPartInfoMap, + Map updateMap, + MapinsertMap, + Connection dbConn, Table tbl) throws SQLException, MetaException, NoSuchObjectException { + StringBuilder prefix = new StringBuilder(); + StringBuilder suffix = new StringBuilder(); + List queries = new ArrayList<>(); + Set selectedParts = new HashSet<>(); + + List partIdList = statsPartInfoMap.keySet().stream().map( + e -> e.partitionId).collect(Collectors.toList() + ); + + prefix.append("select \"PART_ID\", \"COLUMN_NAME\", \"ENGINE\" from \"PART_COL_STATS\" WHERE "); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, + partIdList, "\"PART_ID\"", true, false); + + try (Statement statement = dbConn.createStatement()) { + for (String query : queries) { + LOG.debug("Execute query: " + query); + try (ResultSet rs = statement.executeQuery(query)) { + while (rs.next()) { + selectedParts.add(new PartColNameInfo(rs.getLong(1), rs.getString(2), rs.getString(3))); + } + } + } + } + + for (Map.Entry entry : statsPartInfoMap.entrySet()) { + PartitionInfo partitionInfo = (PartitionInfo) entry.getKey(); + ColumnStatistics colStats = (ColumnStatistics) entry.getValue(); + long partId = partitionInfo.partitionId; + ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + if (!statsDesc.isSetCatName()) { + statsDesc.setCatName(tbl.getCatName()); + } + for (ColumnStatisticsObj statisticsObj : colStats.getStatsObj()) { + PartColNameInfo temp = new PartColNameInfo(partId, statisticsObj.getColName(), + colStats.getEngine()); + if (selectedParts.contains(temp)) { + updateMap.put(temp, StatObjectConverter. + convertToMPartitionColumnStatistics(null, statsDesc, statisticsObj, colStats.getEngine())); + } else { + insertMap.put(temp, StatObjectConverter. + convertToMPartitionColumnStatistics(null, statsDesc, statisticsObj, colStats.getEngine())); + } + } + } + } + + private void updatePartColStatTable(Map updateMap, + Connection dbConn) throws SQLException, MetaException, NoSuchObjectException { + Map>> updates = new HashMap<>(); + for (Map.Entry entry : updateMap.entrySet()) { + MPartitionColumnStatistics mPartitionColumnStatistics = entry.getValue(); + StringBuilder update = new StringBuilder("UPDATE \"PART_COL_STATS\" SET ") + .append(StatObjectConverter.getUpdatedColumnSql(mPartitionColumnStatistics)) + .append(" WHERE \"PART_ID\" = ? AND \"COLUMN_NAME\" = ? AND \"ENGINE\" = ?"); + updates.computeIfAbsent(update.toString(), k -> new ArrayList<>()).add(entry); + } + + for (Map.Entry>> entry : updates.entrySet()) { + List partIds = new ArrayList<>(); + try (PreparedStatement pst = dbConn.prepareStatement(entry.getKey())) { + List> entries = entry.getValue(); + for (Map.Entry partStats : entries) { + PartColNameInfo partColNameInfo = partStats.getKey(); + MPartitionColumnStatistics mPartitionColumnStatistics = partStats.getValue(); + int colIdx = StatObjectConverter.initUpdatedColumnStatement(mPartitionColumnStatistics, pst); + pst.setLong(colIdx++, partColNameInfo.partitionId); + pst.setString(colIdx++, mPartitionColumnStatistics.getColName()); + pst.setString(colIdx++, mPartitionColumnStatistics.getEngine()); + partIds.add(partColNameInfo.partitionId); + pst.addBatch(); + if (partIds.size() == maxBatchSize) { + LOG.debug("Execute updates on part: {}", partIds); + verifyUpdates(pst.executeBatch(), partIds); + partIds = new ArrayList<>(); + } + } + if (!partIds.isEmpty()) { + LOG.debug("Execute updates on part: {}", partIds); + verifyUpdates(pst.executeBatch(), partIds); + } + } + } + } + + private void verifyUpdates(int[] numUpdates, List partIds) throws MetaException { + for (int i = 0; i < numUpdates.length; i++) { + if (numUpdates[i] != 1) { + throw new MetaException("Invalid state of PART_COL_STATS for PART_ID " + partIds.get(i)); + } + } + } + + private void insertIntoPartColStatTable(Map insertMap, + long maxCsId, + Connection dbConn) throws SQLException, MetaException, NoSuchObjectException { + int numRows = 0; + String insert = "INSERT INTO \"PART_COL_STATS\" (\"CS_ID\", \"CAT_NAME\", \"DB_NAME\"," + + "\"TABLE_NAME\", \"PARTITION_NAME\", \"COLUMN_NAME\", \"COLUMN_TYPE\", \"PART_ID\"," + + " \"LONG_LOW_VALUE\", \"LONG_HIGH_VALUE\", \"DOUBLE_HIGH_VALUE\", \"DOUBLE_LOW_VALUE\"," + + " \"BIG_DECIMAL_LOW_VALUE\", \"BIG_DECIMAL_HIGH_VALUE\", \"NUM_NULLS\", \"NUM_DISTINCTS\", \"BIT_VECTOR\" ," + + " \"HISTOGRAM\", \"AVG_COL_LEN\", \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", \"LAST_ANALYZED\", \"ENGINE\") values " + + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + + try (PreparedStatement preparedStatement = dbConn.prepareStatement(insert)) { + for (Map.Entry entry : insertMap.entrySet()) { + PartColNameInfo partColNameInfo = (PartColNameInfo) entry.getKey(); + Long partId = partColNameInfo.partitionId; + MPartitionColumnStatistics mPartitionColumnStatistics = (MPartitionColumnStatistics) entry.getValue(); + + preparedStatement.setLong(1, maxCsId); + preparedStatement.setString(2, mPartitionColumnStatistics.getCatName()); + preparedStatement.setString(3, mPartitionColumnStatistics.getDbName()); + preparedStatement.setString(4, mPartitionColumnStatistics.getTableName()); + preparedStatement.setString(5, mPartitionColumnStatistics.getPartitionName()); + preparedStatement.setString(6, mPartitionColumnStatistics.getColName()); + preparedStatement.setString(7, mPartitionColumnStatistics.getColType()); + preparedStatement.setLong(8, partId); + preparedStatement.setObject(9, mPartitionColumnStatistics.getLongLowValue()); + preparedStatement.setObject(10, mPartitionColumnStatistics.getLongHighValue()); + preparedStatement.setObject(11, mPartitionColumnStatistics.getDoubleHighValue()); + preparedStatement.setObject(12, mPartitionColumnStatistics.getDoubleLowValue()); + preparedStatement.setString(13, mPartitionColumnStatistics.getDecimalLowValue()); + preparedStatement.setString(14, mPartitionColumnStatistics.getDecimalHighValue()); + preparedStatement.setObject(15, mPartitionColumnStatistics.getNumNulls()); + preparedStatement.setObject(16, mPartitionColumnStatistics.getNumDVs()); + preparedStatement.setObject(17, mPartitionColumnStatistics.getBitVector()); + preparedStatement.setBytes(18, mPartitionColumnStatistics.getHistogram()); + preparedStatement.setObject(19, mPartitionColumnStatistics.getAvgColLen()); + preparedStatement.setObject(20, mPartitionColumnStatistics.getMaxColLen()); + preparedStatement.setObject(21, mPartitionColumnStatistics.getNumTrues()); + preparedStatement.setObject(22, mPartitionColumnStatistics.getNumFalses()); + preparedStatement.setLong(23, mPartitionColumnStatistics.getLastAnalyzed()); + preparedStatement.setString(24, mPartitionColumnStatistics.getEngine()); + + maxCsId++; + numRows++; + preparedStatement.addBatch(); + if (numRows == maxBatchSize) { + preparedStatement.executeBatch(); + numRows = 0; + } + } + + if (numRows != 0) { + preparedStatement.executeBatch(); + } + } + } + + private Map getParamValues(Connection dbConn, List partIdList) throws SQLException { + List queries = new ArrayList<>(); + StringBuilder prefix = new StringBuilder(); + StringBuilder suffix = new StringBuilder(); + + prefix.append("select \"PART_ID\", \"PARAM_VALUE\" " + + " from \"PARTITION_PARAMS\" where " + + " \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE' " + + " and "); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, + partIdList, "\"PART_ID\"", true, false); + + Map partIdToParaMap = new HashMap<>(); + try (Statement statement = dbConn.createStatement()) { + for (String query : queries) { + LOG.debug("Execute query: " + query); + try (ResultSet rs = statement.executeQuery(query)) { + while (rs.next()) { + partIdToParaMap.put(rs.getLong(1), rs.getString(2)); + } + } + } + } + + return partIdToParaMap; + } + + private void updateWriteIdForPartitions(Connection dbConn, long writeId, List partIdList) throws SQLException { + StringBuilder prefix = new StringBuilder(); + List queries = new ArrayList<>(); + StringBuilder suffix = new StringBuilder(); + + prefix.append("UPDATE \"PARTITIONS\" set \"WRITE_ID\" = " + writeId + " where "); + TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, + partIdList, "\"PART_ID\"", false, false); + + try (Statement statement = dbConn.createStatement()) { + for (String query : queries) { + LOG.debug("Execute update: " + query); + statement.executeUpdate(query); + } + } + } + + private Map> updatePartitionParamTable(Connection dbConn, + Map partitionInfoMap, + String validWriteIds, + long writeId, + boolean isAcidTable) + throws SQLException, MetaException { + Map> result = new HashMap<>(); + boolean areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED); + String insert = "INSERT INTO \"PARTITION_PARAMS\" (\"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\") " + + "VALUES( ? , 'COLUMN_STATS_ACCURATE' , ? )"; + String delete = "DELETE from \"PARTITION_PARAMS\" " + + " where \"PART_ID\" = ? " + + " and \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE'"; + String update = "UPDATE \"PARTITION_PARAMS\" set \"PARAM_VALUE\" = ? " + + " where \"PART_ID\" = ? " + + " and \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE'"; + int numInsert = 0; + int numDelete = 0; + int numUpdate = 0; + + List partIdList = partitionInfoMap.keySet().stream().map( + e -> e.partitionId).collect(Collectors.toList() + ); + + // get the old parameters from PARTITION_PARAMS table. + Map partIdToParaMap = getParamValues(dbConn, partIdList); + + try (PreparedStatement statementInsert = dbConn.prepareStatement(insert); + PreparedStatement statementDelete = dbConn.prepareStatement(delete); + PreparedStatement statementUpdate = dbConn.prepareStatement(update)) { + for (Map.Entry entry : partitionInfoMap.entrySet()) { + PartitionInfo partitionInfo = (PartitionInfo) entry.getKey(); + ColumnStatistics colStats = (ColumnStatistics) entry.getValue(); + List colNames = colStats.getStatsObj().stream().map(e -> e.getColName()).collect(Collectors.toList()); + long partWriteId = partitionInfo.writeId; + long partId = partitionInfo.partitionId; + Map newParameter; + + if (!partIdToParaMap.containsKey(partId)) { + newParameter = new HashMap<>(); + newParameter.put(COLUMN_STATS_ACCURATE, "TRUE"); + StatsSetupConst.setColumnStatsState(newParameter, colNames); + statementInsert.setLong(1, partId); + statementInsert.setString(2, newParameter.get(COLUMN_STATS_ACCURATE)); + numInsert++; + statementInsert.addBatch(); + if (numInsert == maxBatchSize) { + LOG.debug(" Executing insert " + insert); + statementInsert.executeBatch(); + numInsert = 0; + } + } else { + String oldStats = partIdToParaMap.get(partId); + + Map oldParameter = new HashMap<>(); + oldParameter.put(COLUMN_STATS_ACCURATE, oldStats); + + newParameter = new HashMap<>(); + newParameter.put(COLUMN_STATS_ACCURATE, oldStats); + StatsSetupConst.setColumnStatsState(newParameter, colNames); + + if (isAcidTable) { + String errorMsg = ObjectStore.verifyStatsChangeCtx( + colStats.getStatsDesc().getDbName() + "." + colStats.getStatsDesc().getTableName(), + oldParameter, newParameter, writeId, validWriteIds, true); + if (errorMsg != null) { + throw new MetaException(errorMsg); + } + } + + if (isAcidTable && + (!areTxnStatsSupported || !ObjectStore.isCurrentStatsValidForTheQuery(oldParameter, partWriteId, + validWriteIds, true))) { + statementDelete.setLong(1, partId); + statementDelete.addBatch(); + numDelete++; + if (numDelete == maxBatchSize) { + statementDelete.executeBatch(); + numDelete = 0; + LOG.debug("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + colStats.getStatsDesc().getDbName() + "." + colStats.getStatsDesc().getTableName() + "." + + colStats.getStatsDesc().getPartName()); + } + } else { + statementUpdate.setString(1, newParameter.get(COLUMN_STATS_ACCURATE)); + statementUpdate.setLong(2, partId); + statementUpdate.addBatch(); + numUpdate++; + if (numUpdate == maxBatchSize) { + LOG.debug(" Executing update " + statementUpdate); + statementUpdate.executeBatch(); + numUpdate = 0; + } + } + } + result.put(partitionInfo.partitionName, newParameter); + } + + if (numInsert != 0) { + statementInsert.executeBatch(); + } + + if (numUpdate != 0) { + statementUpdate.executeBatch(); + } + + if (numDelete != 0) { + statementDelete.executeBatch(); + } + + if (isAcidTable) { + updateWriteIdForPartitions(dbConn, writeId, partIdList); + } + return result; + } + } + + + private Map getPartitionInfo(Connection dbConn, long tblId, + Map partColStatsMap) + throws SQLException, MetaException { + List queries = new ArrayList<>(); + StringBuilder prefix = new StringBuilder(); + StringBuilder suffix = new StringBuilder(); + Map partitionInfoMap = new HashMap<>(); + + List partKeys = partColStatsMap.keySet().stream().map( + e -> quoteString(e)).collect(Collectors.toList() + ); + + prefix.append("select \"PART_ID\", \"WRITE_ID\", \"PART_NAME\" from \"PARTITIONS\" where "); + suffix.append(" and \"TBL_ID\" = " + tblId); + TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, + partKeys, "\"PART_NAME\"", true, false); + + try (Statement statement = dbConn.createStatement()) { + for (String query : queries) { + // Select for update makes sure that the partitions are not modified while the stats are getting updated. + query = sqlGenerator.addForUpdateClause(query); + LOG.debug("Execute query: " + query); + try (ResultSet rs = statement.executeQuery(query)) { + while (rs.next()) { + PartitionInfo partitionInfo = new PartitionInfo(rs.getLong(1), + rs.getLong(2), rs.getString(3)); + partitionInfoMap.put(partitionInfo, partColStatsMap.get(rs.getString(3))); + } + } + } + } + return partitionInfoMap; + } + + private void setAnsiQuotes(Connection dbConn) throws SQLException { + if (sqlGenerator.getDbProduct().isMYSQL()) { + try (Statement stmt = dbConn.createStatement()) { + stmt.execute("SET @@session.sql_mode=ANSI_QUOTES"); + } + } + } + + /** + * Update the statistics for the given partitions. Add the notification logs also. + * @return map of partition key to column stats if successful, null otherwise. + */ + public Map> updatePartitionColumnStatistics(Map partColStatsMap, + Table tbl, long csId, + String validWriteIds, long writeId, + List transactionalListeners) + throws MetaException { + JDOConnection jdoConn = null; + Connection dbConn = null; + boolean committed = false; + try { + dbType.lockInternal(); + jdoConn = pm.getDataStoreConnection(); + dbConn = (Connection) (jdoConn.getNativeConnection()); + + setAnsiQuotes(dbConn); + + Map partitionInfoMap = getPartitionInfo(dbConn, tbl.getId(), partColStatsMap); + + Map> result = + updatePartitionParamTable(dbConn, partitionInfoMap, validWriteIds, writeId, TxnUtils.isAcidTable(tbl)); + + Map insertMap = new HashMap<>(); + Map updateMap = new HashMap<>(); + populateInsertUpdateMap(partitionInfoMap, updateMap, insertMap, dbConn, tbl); + + LOG.info("Number of stats to insert " + insertMap.size() + " update " + updateMap.size()); + + if (insertMap.size() != 0) { + insertIntoPartColStatTable(insertMap, csId, dbConn); + } + + if (updateMap.size() != 0) { + updatePartColStatTable(updateMap, dbConn); + } + + if (transactionalListeners != null) { + UpdatePartitionColumnStatEventBatch eventBatch = new UpdatePartitionColumnStatEventBatch(null); + for (Map.Entry entry : result.entrySet()) { + Map parameters = (Map) entry.getValue(); + ColumnStatistics colStats = partColStatsMap.get(entry.getKey()); + List partVals = getPartValsFromName(tbl, colStats.getStatsDesc().getPartName()); + UpdatePartitionColumnStatEvent event = new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, + tbl, writeId, null); + eventBatch.addPartColStatEvent(event); + } + MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, + EventMessage.EventType.UPDATE_PARTITION_COLUMN_STAT_BATCH, eventBatch, dbConn, sqlGenerator); + } + dbConn.commit(); + committed = true; + return result; + } catch (Exception e) { + LOG.error("Unable to update Column stats for " + tbl.getTableName(), e); + throw new MetaException("Unable to update Column stats for " + tbl.getTableName() + + " due to: " + e.getMessage()); + } finally { + if (!committed) { + rollbackDBConn(dbConn); + } + closeDbConn(jdoConn); + dbType.unlockInternal(); + } + } + + /** + * Gets the next CS id from sequence MPartitionColumnStatistics and increment the CS id by numStats. + * @return The CD id before update. + */ + public long getNextCSIdForMPartitionColumnStatistics(long numStats) throws MetaException { + long maxCsId = 0; + boolean committed = false; + Connection dbConn = null; + JDOConnection jdoConn = null; + + try { + dbType.lockInternal(); + jdoConn = pm.getDataStoreConnection(); + dbConn = (Connection) (jdoConn.getNativeConnection()); + + setAnsiQuotes(dbConn); + + // This loop will be iterated at max twice. If there is no records, it will first insert and then do a select. + // We are not using any upsert operations as select for update and then update is required to make sure that + // the caller gets a reserved range for CSId not used by any other thread. + boolean insertDone = false; + while (maxCsId == 0) { + String query = sqlGenerator.addForUpdateClause("SELECT \"NEXT_VAL\" FROM \"SEQUENCE_TABLE\" " + + "WHERE \"SEQUENCE_NAME\"= " + + quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics")); + LOG.debug("Execute query: " + query); + try (Statement statement = dbConn.createStatement(); + ResultSet rs = statement.executeQuery(query)) { + if (rs.next()) { + maxCsId = rs.getLong(1); + } else if (insertDone) { + throw new MetaException("Invalid state of SEQUENCE_TABLE for MPartitionColumnStatistics"); + } else { + insertDone = true; + query = "INSERT INTO \"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") VALUES ( " + + quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics") + "," + 1 + + ")"; + try { + statement.executeUpdate(query); + } catch (SQLException e) { + // If the record is already inserted by some other thread continue to select. + if (dbType.isDuplicateKeyError(e)) { + continue; + } + LOG.error("Unable to insert into SEQUENCE_TABLE for MPartitionColumnStatistics.", e); + throw e; + } + } + } + } + + long nextMaxCsId = maxCsId + numStats + 1; + String query = "UPDATE \"SEQUENCE_TABLE\" SET \"NEXT_VAL\" = " + + nextMaxCsId + + " WHERE \"SEQUENCE_NAME\" = " + + quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics"); + + try (Statement statement = dbConn.createStatement()) { + statement.executeUpdate(query); + } + dbConn.commit(); + committed = true; + return maxCsId; + } catch (Exception e) { + LOG.error("Unable to getNextCSIdForMPartitionColumnStatistics", e); + throw new MetaException("Unable to getNextCSIdForMPartitionColumnStatistics " + + " due to: " + e.getMessage()); + } finally { + if (!committed) { + rollbackDBConn(dbConn); + } + closeDbConn(jdoConn); + dbType.unlockInternal(); + } + } + + public void alterPartitions(Map, Long> partValuesToId, Map partIdToSdId, + List newParts) throws MetaException { + List partIds = new ArrayList<>(newParts.size()); + Map>> partParamsOpt = new HashMap<>(); + Map idToSd = new HashMap<>(); + for (Partition newPart : newParts) { + Long partId = partValuesToId.get(newPart.getValues()); + Long sdId = partIdToSdId.get(partId); + partIds.add(partId); + partParamsOpt.put(partId, Optional.ofNullable(newPart.getParameters())); + idToSd.put(sdId, newPart.getSd()); + } + + // alter partitions does not change partition values, + // so only PARTITIONS and PARTITION_PARAMS need to update. + updatePartitionsInBatch(partValuesToId, newParts); + updateParamTableInBatch("\"PARTITION_PARAMS\"", "\"PART_ID\"", partIds, partParamsOpt); + updateStorageDescriptorInBatch(idToSd); + } + + private interface ThrowableConsumer { + void accept(T t) throws SQLException, MetaException; + } + + private List filterIdsByNonNullValue(List ids, Map map) { + return ids.stream().filter(id -> map.get(id) != null).collect(Collectors.toList()); + } + + private void updateWithStatement(ThrowableConsumer consumer, String query) + throws MetaException { + JDOConnection jdoConn = pm.getDataStoreConnection(); + boolean doTrace = LOG.isDebugEnabled(); + long start = doTrace ? System.nanoTime() : 0; + try (PreparedStatement statement = + ((Connection) jdoConn.getNativeConnection()).prepareStatement(query)) { + consumer.accept(statement); + MetastoreDirectSqlUtils.timingTrace(doTrace, query, start, doTrace ? System.nanoTime() : 0); + } catch (SQLException e) { + LOG.error("Failed to execute update query: " + query, e); + throw new MetaException("Unable to execute update due to: " + e.getMessage()); + } finally { + closeDbConn(jdoConn); + } + } + + private void updatePartitionsInBatch(Map, Long> partValuesToId, + List newParts) throws MetaException { + List columns = Arrays.asList("\"CREATE_TIME\"", "\"LAST_ACCESS_TIME\"", "\"WRITE_ID\""); + List conditionKeys = Arrays.asList("\"PART_ID\""); + String stmt = TxnUtils.createUpdatePreparedStmt("\"PARTITIONS\"", columns, conditionKeys); + int maxRows = dbType.getMaxRows(maxBatchSize, 4); + updateWithStatement(statement -> Batchable.runBatched(maxRows, newParts, new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Partition p : input) { + statement.setLong(1, p.getCreateTime()); + statement.setLong(2, p.getLastAccessTime()); + statement.setLong(3, p.getWriteId()); + statement.setLong(4, partValuesToId.get(p.getValues())); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + }), stmt); + } + + /* Get stringListId from both SKEWED_VALUES and SKEWED_COL_VALUE_LOC_MAP tables. */ + private List getStringListId(List sdIds) throws MetaException { + return Batchable.runBatched(maxBatchSize, sdIds, new Batchable() { + @Override + public List run(List input) throws Exception { + List result = new ArrayList<>(); + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryFromSkewedValues = "select \"STRING_LIST_ID_EID\" " + + "from \"SKEWED_VALUES\" where \"SD_ID_OID\" in (" + idLists + ")"; + try (QueryWrapper query = + new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryFromSkewedValues))) { + List sqlResult = executeWithArray(query.getInnerQuery(), null, queryFromSkewedValues); + result.addAll(sqlResult); + } + String queryFromValueLoc = "select \"STRING_LIST_ID_KID\" " + + "from \"SKEWED_COL_VALUE_LOC_MAP\" where \"SD_ID\" in (" + idLists + ")"; + try (QueryWrapper query = + new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryFromValueLoc))) { + List sqlResult = executeWithArray(query.getInnerQuery(), null, queryFromValueLoc); + result.addAll(sqlResult); + } + return result; + } + }); + } + + private void updateParamTableInBatch(String paramTable, String idColumn, List ids, + Map>> newParamsOpt) throws MetaException { + Map> oldParams = getParams(paramTable, idColumn, ids); + + List> paramsToDelete = new ArrayList<>(); + List>> paramsToUpdate = new ArrayList<>(); + List>> paramsToAdd = new ArrayList<>(); + + for (Long id : ids) { + Map oldParam = oldParams.getOrDefault(id, new HashMap<>()); + Map newParam = newParamsOpt.get(id).orElseGet(HashMap::new); + for (Map.Entry entry : oldParam.entrySet()) { + String key = entry.getKey(); + String oldValue = entry.getValue(); + if (!newParam.containsKey(key)) { + paramsToDelete.add(Pair.of(id, key)); + } else if (!oldValue.equals(newParam.get(key))) { + paramsToUpdate.add(Pair.of(id, Pair.of(key, newParam.get(key)))); + } + } + List>> newlyParams = newParam.entrySet().stream() + .filter(entry -> !oldParam.containsKey(entry.getKey())) + .map(entry -> Pair.of(id, Pair.of(entry.getKey(), entry.getValue()))) + .collect(Collectors.toList()); + paramsToAdd.addAll(newlyParams); + } + + deleteParams(paramTable, idColumn, paramsToDelete); + updateParams(paramTable, idColumn, paramsToUpdate); + insertParams(paramTable, idColumn, paramsToAdd); + } + + private Map> getParams(String paramTable, String idName, + List ids) throws MetaException { + Map> idToParams = new HashMap<>(); + Batchable.runBatched(maxBatchSize, ids, new Batchable() { + @Override + public List run(List input) throws MetaException { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryText = "select " + idName + ", \"PARAM_KEY\", \"PARAM_VALUE\" from " + + paramTable + " where " + idName + " in (" + idLists + ")"; + try (QueryWrapper query = new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) { + List sqlResult = executeWithArray(query.getInnerQuery(), null, queryText); + for (Object[] row : sqlResult) { + Long id = extractSqlLong(row[0]); + String paramKey = (String) row[1]; + String paramVal = (String) row[2]; + idToParams.computeIfAbsent(id, key -> new HashMap<>()).put(paramKey, paramVal); + } + } + return null; + } + }); + return idToParams; + } + + private void deleteParams(String paramTable, String idColumn, + List> deleteIdKeys) throws MetaException { + String deleteStmt = "delete from " + paramTable + " where " + idColumn + "=? and PARAM_KEY=?"; + int maxRows = dbType.getMaxRows(maxBatchSize, 2); + updateWithStatement(statement -> Batchable.runBatched(maxRows, deleteIdKeys, + new Batchable, Void>() { + @Override + public List run(List> input) throws SQLException { + for (Pair pair : input) { + statement.setLong(1, pair.getLeft()); + statement.setString(2, pair.getRight()); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + } + ), deleteStmt); + } + + private void updateParams(String paramTable, String idColumn, + List>> updateIdAndParams) throws MetaException { + List columns = Arrays.asList("\"PARAM_VALUE\""); + List conditionKeys = Arrays.asList(idColumn, "\"PARAM_KEY\""); + String stmt = TxnUtils.createUpdatePreparedStmt(paramTable, columns, conditionKeys); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, updateIdAndParams, + new Batchable>, Object>() { + @Override + public List run(List>> input) throws SQLException { + for (Pair> pair : input) { + statement.setString(1, pair.getRight().getRight()); + statement.setLong(2, pair.getLeft()); + statement.setString(3, pair.getRight().getLeft()); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + } + ), stmt); + } + + private void insertParams(String paramTable, String idColumn, + List>> addIdAndParams) throws MetaException { + List columns = Arrays.asList(idColumn, "\"PARAM_KEY\"", "\"PARAM_VALUE\""); + String query = TxnUtils.createInsertPreparedStmt(paramTable, columns); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, addIdAndParams, + new Batchable>, Void>() { + @Override + public List run(List>> input) throws SQLException { + for (Pair> pair : input) { + statement.setLong(1, pair.getLeft()); + statement.setString(2, pair.getRight().getLeft()); + statement.setString(3, pair.getRight().getRight()); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + } + ), query); + } + + private void updateStorageDescriptorInBatch(Map idToSd) + throws MetaException { + Map sdIdToCdId = new HashMap<>(); + Map sdIdToSerdeId = new HashMap<>(); + List cdIds = new ArrayList<>(); + List validSdIds = filterIdsByNonNullValue(new ArrayList<>(idToSd.keySet()), idToSd); + Batchable.runBatched(maxBatchSize, validSdIds, new Batchable() { + @Override + public List run(List input) throws Exception { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryText = "select \"SD_ID\", \"CD_ID\", \"SERDE_ID\" from \"SDS\" " + + "where \"SD_ID\" in (" + idLists + ")"; + try (QueryWrapper query = new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) { + List sqlResult = executeWithArray(query.getInnerQuery(), null, queryText); + for (Object[] row : sqlResult) { + Long sdId = extractSqlLong(row[0]); + Long cdId = extractSqlLong(row[1]); + Long serdeId = extractSqlLong(row[2]); + sdIdToCdId.put(sdId, cdId); + sdIdToSerdeId.put(sdId, serdeId); + cdIds.add(cdId); + } + } + return null; + } + }); + + Map>> sdParamsOpt = new HashMap<>(); + Map> idToBucketCols = new HashMap<>(); + Map> idToSortCols = new HashMap<>(); + Map idToSkewedInfo = new HashMap<>(); + Map> sdIdToNewColumns = new HashMap<>(); + List serdeIds = new ArrayList<>(); + Map serdeIdToSerde = new HashMap<>(); + Map>> serdeParamsOpt = new HashMap<>(); + for (Long sdId : validSdIds) { + StorageDescriptor sd = idToSd.get(sdId); + sdParamsOpt.put(sdId, Optional.ofNullable(sd.getParameters())); + idToBucketCols.put(sdId, sd.getBucketCols()); + idToSortCols.put(sdId, sd.getSortCols()); + idToSkewedInfo.put(sdId, sd.getSkewedInfo()); + sdIdToNewColumns.put(sdId, sd.getCols()); + + Long serdeId = sdIdToSerdeId.get(sdId); + serdeIds.add(serdeId); + serdeIdToSerde.put(serdeId, sd.getSerdeInfo()); + serdeParamsOpt.put(serdeId, Optional.ofNullable(sd.getSerdeInfo().getParameters())); + } + + updateParamTableInBatch("\"SD_PARAMS\"", "\"SD_ID\"", validSdIds, sdParamsOpt); + updateBucketColsInBatch(idToBucketCols, validSdIds); + updateSortColsInBatch(idToSortCols, validSdIds); + updateSkewedInfoInBatch(idToSkewedInfo, validSdIds); + Map sdIdToNewCdId = updateCDInBatch(cdIds, validSdIds, sdIdToCdId, sdIdToNewColumns); + updateSerdeInBatch(serdeIds, serdeIdToSerde); + updateParamTableInBatch("\"SERDE_PARAMS\"", "\"SERDE_ID\"", serdeIds, serdeParamsOpt); + + List cdIdsMayDelete = sdIdToCdId.entrySet().stream() + .filter(entry -> sdIdToNewCdId.containsKey(entry.getKey())) + .map(entry -> entry.getValue()) + .collect(Collectors.toList()); + + // Update SDS table after CDS to get the freshest CD_ID values. + sdIdToCdId.replaceAll((sdId, cdId) -> + sdIdToNewCdId.containsKey(sdId) ? sdIdToNewCdId.get(sdId) : cdId); + updateSDInBatch(validSdIds, idToSd, sdIdToCdId); + + List usedIds = Batchable.runBatched(maxBatchSize, cdIdsMayDelete, + new Batchable() { + @Override + public List run(List input) throws Exception { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryText = "select \"CD_ID\" from \"SDS\" where \"CD_ID\" in ( " + idLists + ")"; + try (QueryWrapper query = new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) { + List sqlResult = executeWithArray(query.getInnerQuery(), null, queryText); + return new ArrayList<>(sqlResult); + } + } + }); + List unusedCdIds = cdIdsMayDelete.stream().filter(id -> !usedIds.contains(id)).collect(Collectors.toList()); + + deleteCDInBatch(unusedCdIds); + } + + private void updateSDInBatch(List ids, Map idToSd, + Map idToCdId) throws MetaException { + List columns = Arrays.asList("\"CD_ID\"", "\"INPUT_FORMAT\"", "\"IS_COMPRESSED\"", + "\"IS_STOREDASSUBDIRECTORIES\"", "\"LOCATION\"", "\"NUM_BUCKETS\"", "\"OUTPUT_FORMAT\""); + List conditionKeys = Arrays.asList("\"SD_ID\""); + String stmt = TxnUtils.createUpdatePreparedStmt("\"SDS\"", columns, conditionKeys); + int maxRows = dbType.getMaxRows(maxBatchSize, 8); + updateWithStatement(statement -> Batchable.runBatched(maxRows, ids, + new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long sdId : input) { + StorageDescriptor sd = idToSd.get(sdId); + statement.setLong(1, idToCdId.get(sdId)); + statement.setString(2, sd.getInputFormat()); + statement.setBoolean(3, sd.isCompressed()); + statement.setBoolean(4, sd.isStoredAsSubDirectories()); + statement.setString(5, sd.getLocation()); + statement.setInt(6, sd.getNumBuckets()); + statement.setString(7, sd.getOutputFormat()); + statement.setLong(8, sdId); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + } + ), stmt); + } + + private void updateBucketColsInBatch(Map> sdIdToBucketCols, + List sdIds) throws MetaException { + Batchable.runBatched(maxBatchSize, sdIds, new Batchable() { + @Override + public List run(List input) throws MetaException { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryText = "delete from \"BUCKETING_COLS\" where \"SD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, queryText); + return null; + } + }); + List columns = Arrays.asList("\"SD_ID\"", "\"INTEGER_IDX\"", "\"BUCKET_COL_NAME\""); + String stmt = TxnUtils.createInsertPreparedStmt("\"BUCKETING_COLS\"", columns); + List idWithBucketCols = filterIdsByNonNullValue(sdIds, sdIdToBucketCols); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithBucketCols, new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long id : input) { + List bucketCols = sdIdToBucketCols.get(id); + for (int i = 0; i < bucketCols.size(); i++) { + statement.setLong(1, id); + statement.setInt(2, i); + statement.setString(3, bucketCols.get(i)); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + }), stmt); + } + + private void updateSortColsInBatch(Map> sdIdToSortCols, + List sdIds) throws MetaException { + Batchable.runBatched(maxBatchSize, sdIds, new Batchable() { + @Override + public List run(List input) throws MetaException { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryText = "delete from \"SORT_COLS\" where \"SD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, queryText); + return null; + } + }); + + List columns = Arrays.asList("\"SD_ID\"", "\"INTEGER_IDX\"", "\"COLUMN_NAME\"", "\"ORDER\""); + String stmt = TxnUtils.createInsertPreparedStmt("\"SORT_COLS\"", columns); + List idWithSortCols = filterIdsByNonNullValue(sdIds, sdIdToSortCols); + int maxRows = dbType.getMaxRows(maxBatchSize, 4); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithSortCols, new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long id : input) { + List bucketCols = sdIdToSortCols.get(id); + for (int i = 0; i < bucketCols.size(); i++) { + statement.setLong(1, id); + statement.setInt(2, i); + statement.setString(3, bucketCols.get(i).getCol()); + statement.setInt(4, bucketCols.get(i).getOrder()); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + }), stmt); + } + + private void updateSkewedInfoInBatch(Map sdIdToSkewedInfo, + List sdIds) throws MetaException { + // Delete all mapping old stringLists and skewedValues, + // skewedValues first for the foreign key constraint. + List stringListId = getStringListId(sdIds); + if (!stringListId.isEmpty()) { + Batchable.runBatched(maxBatchSize, sdIds, new Batchable() { + @Override + public List run(List input) throws Exception { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String deleteSkewValuesQuery = + "delete from \"SKEWED_VALUES\" where \"SD_ID_OID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteSkewValuesQuery); + String deleteSkewColValueLocMapQuery = + "delete from \"SKEWED_COL_VALUE_LOC_MAP\" where \"SD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteSkewColValueLocMapQuery); + String deleteSkewColNamesQuery = + "delete from \"SKEWED_COL_NAMES\" where \"SD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteSkewColNamesQuery); + return null; + } + }); + Batchable.runBatched(maxBatchSize, stringListId, new Batchable() { + @Override + public List run(List input) throws MetaException { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String deleteStringListValuesQuery = + "delete from \"SKEWED_STRING_LIST_VALUES\" where \"STRING_LIST_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteStringListValuesQuery); + String deleteStringListQuery = + "delete from \"SKEWED_STRING_LIST\" where \"STRING_LIST_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteStringListQuery); + return null; + } + }); + } + + // Generate new stringListId for each SdId + Map> idToSkewedColNames = new HashMap<>(); // used for SKEWED_COL_NAMES + List newStringListId = new ArrayList<>(); // used for SKEWED_STRING_LIST + Map> stringListIdToValues = new HashMap<>(); // used for SKEWED_STRING_LIST_VALUES + Map> sdIdToNewStringListId = new HashMap<>(); // used for SKEWED_VALUES + Map>> sdIdToValueLoc = new HashMap<>(); // used for SKEWED_COL_VALUE_LOC_MAP + + List idWithSkewedInfo = filterIdsByNonNullValue(sdIds, sdIdToSkewedInfo); + for (Long sdId : idWithSkewedInfo) { + SkewedInfo skewedInfo = sdIdToSkewedInfo.get(sdId); + idToSkewedColNames.put(sdId, skewedInfo.getSkewedColNames()); + List> skewedColValues = skewedInfo.getSkewedColValues(); + if (skewedColValues != null) { + for (List colValues : skewedColValues) { + Long nextStringListId = getDataStoreId(MStringList.class); + newStringListId.add(nextStringListId); + sdIdToNewStringListId.computeIfAbsent(sdId, k -> new ArrayList<>()).add(nextStringListId); + stringListIdToValues.put(nextStringListId, colValues); + } + } + Map, String> skewedColValueLocationMaps = skewedInfo.getSkewedColValueLocationMaps(); + if (skewedColValueLocationMaps != null) { + for (Map.Entry, String> entry : skewedColValueLocationMaps.entrySet()) { + List colValues = entry.getKey(); + String location = entry.getValue(); + Long nextStringListId = getDataStoreId(MStringList.class); + newStringListId.add(nextStringListId); + stringListIdToValues.put(nextStringListId, colValues); + sdIdToValueLoc.computeIfAbsent(sdId, k -> new ArrayList<>()).add(Pair.of(nextStringListId, location)); + } + } + } + + insertSkewedColNamesInBatch(idToSkewedColNames, sdIds); + insertStringListInBatch(newStringListId); + insertStringListValuesInBatch(stringListIdToValues, newStringListId); + insertSkewedValuesInBatch(sdIdToNewStringListId, sdIds); + insertSkewColValueLocInBatch(sdIdToValueLoc, sdIds); + } + + private Long getDataStoreId(Class modelClass) throws MetaException { + ExecutionContext ec = ((JDOPersistenceManager) pm).getExecutionContext(); + AbstractClassMetaData cmd = ec.getMetaDataManager().getMetaDataForClass(modelClass, ec.getClassLoaderResolver()); + if (cmd.getIdentityType() == IdentityType.DATASTORE) { + return (Long) ec.getStoreManager().getValueGenerationStrategyValue(ec, cmd, -1); + } else { + throw new MetaException("Identity type is not datastore."); + } + } + + private void insertSkewedColNamesInBatch(Map> sdIdToSkewedColNames, + List sdIds) throws MetaException { + List columns = Arrays.asList("\"SD_ID\"", "\"INTEGER_IDX\"", "\"SKEWED_COL_NAME\""); + String stmt = TxnUtils.createInsertPreparedStmt("\"SKEWED_COL_NAMES\"", columns); + List idWithSkewedCols = filterIdsByNonNullValue(sdIds, sdIdToSkewedColNames); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithSkewedCols, new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long id : input) { + List skewedColNames = sdIdToSkewedColNames.get(id); + for (int i = 0; i < skewedColNames.size(); i++) { + statement.setLong(1, id); + statement.setInt(2, i); + statement.setString(3, skewedColNames.get(i)); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + }), stmt); + } + + private void insertStringListInBatch(List stringListIds) throws MetaException { + List columns = Arrays.asList("\"STRING_LIST_ID\""); + String insertQuery = TxnUtils.createInsertPreparedStmt("\"SKEWED_STRING_LIST\"", columns); + int maxRows = dbType.getMaxRows(maxBatchSize, 1); + updateWithStatement(statement -> Batchable.runBatched(maxRows, stringListIds, + new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long id : input) { + statement.setLong(1, id); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + } + ), insertQuery); + } + + private void insertStringListValuesInBatch(Map> stringListIdToValues, + List stringListIds) throws MetaException { + List columns = Arrays.asList("\"STRING_LIST_ID\"", "\"INTEGER_IDX\"", "\"STRING_LIST_VALUE\""); + String insertQuery = TxnUtils.createInsertPreparedStmt("\"SKEWED_STRING_LIST_VALUES\"", columns); + List idWithStringList = filterIdsByNonNullValue(stringListIds, stringListIdToValues); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithStringList, + new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long stringListId : input) { + List values = stringListIdToValues.get(stringListId); + for (int i = 0; i < values.size(); i++) { + statement.setLong(1, stringListId); + statement.setInt(2, i); + statement.setString(3, values.get(i)); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + } + ), insertQuery); + } + + private void insertSkewedValuesInBatch(Map> sdIdToStringListId, + List sdIds) throws MetaException { + List columns = Arrays.asList("\"SD_ID_OID\"", "\"INTEGER_IDX\"", "\"STRING_LIST_ID_EID\""); + String insertQuery = TxnUtils.createInsertPreparedStmt("\"SKEWED_VALUES\"", columns); + List idWithSkewedValues = filterIdsByNonNullValue(sdIds, sdIdToStringListId); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithSkewedValues, + new Batchable() { + @Override + public List run(List input) throws Exception { + for (Long sdId : input) { + List stringListIds = sdIdToStringListId.get(sdId); + for (int i = 0; i < stringListIds.size(); i++) { + statement.setLong(1, sdId); + statement.setInt(2, i); + statement.setLong(3, stringListIds.get(i)); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + } + ), insertQuery); + } + + private void insertSkewColValueLocInBatch(Map>> sdIdToColValueLoc, + List sdIds) throws MetaException { + List columns = Arrays.asList("\"SD_ID\"", "\"STRING_LIST_ID_KID\"", "\"LOCATION\""); + String insertQuery = TxnUtils.createInsertPreparedStmt("\"SKEWED_COL_VALUE_LOC_MAP\"", columns); + List idWithColValueLoc = filterIdsByNonNullValue(sdIds, sdIdToColValueLoc); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithColValueLoc, + new Batchable() { + @Override + public List run(List input) throws Exception { + for (Long sdId : input) { + List> stringListIdAndLoc = sdIdToColValueLoc.get(sdId); + for (Pair pair : stringListIdAndLoc) { + statement.setLong(1, sdId); + statement.setLong(2, pair.getLeft()); + statement.setString(3, pair.getRight()); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + } + ), insertQuery); + } + + private Map updateCDInBatch(List cdIds, List sdIds, Map sdIdToCdId, + Map> sdIdToNewColumns) throws MetaException { + Map>> cdIdToColIdxPair = new HashMap<>(); + Batchable.runBatched(maxBatchSize, cdIds, new Batchable() { + @Override + public List run(List input) throws Exception { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + String queryText = "select \"CD_ID\", \"COMMENT\", \"COLUMN_NAME\", \"TYPE_NAME\", " + + "\"INTEGER_IDX\" from \"COLUMNS_V2\" where \"CD_ID\" in (" + idLists + ")"; + try (QueryWrapper query = new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) { + List sqlResult = executeWithArray(query.getInnerQuery(), null, queryText); + for (Object[] row : sqlResult) { + Long id = extractSqlLong(row[0]); + String comment = (String) row[1]; + String name = (String) row[2]; + String type = (String) row[3]; + int index = extractSqlInt(row[4]); + FieldSchema field = new FieldSchema(name, type, comment); + cdIdToColIdxPair.computeIfAbsent(id, k -> new ArrayList<>()).add(Pair.of(index, field)); + } + } + return null; + } + }); + List newCdIds = new ArrayList<>(); + Map> newCdIdToCols = new HashMap<>(); + Map oldCdIdToNewCdId = new HashMap<>(); + Map sdIdToNewCdId = new HashMap<>(); + // oldCdId -> [(oldIdx, newIdx)], used to update KEY_CONSTRAINTS + Map>> oldCdIdToColIdxPairs = new HashMap<>(); + for (Long sdId : sdIds) { + Long cdId = sdIdToCdId.get(sdId); + List> cols = cdIdToColIdxPair.get(cdId); + // Placeholder to avoid IndexOutOfBoundsException. + List oldCols = new ArrayList<>(Collections.nCopies(cols.size(), null)); + cols.forEach(pair -> oldCols.set(pair.getLeft(), pair.getRight())); + + List newCols = sdIdToNewColumns.get(sdId); + // Use the new column descriptor only if the old column descriptor differs from the new one. + if (oldCols == null || !oldCols.equals(newCols)) { + if (oldCols != null && newCols != null) { + Long newCdId = getDataStoreId(MColumnDescriptor.class); + newCdIds.add(newCdId); + newCdIdToCols.put(newCdId, newCols); + oldCdIdToNewCdId.put(cdId, newCdId); + sdIdToNewCdId.put(sdId, newCdId); + for (int i = 0; i < oldCols.size(); i++) { + FieldSchema oldCol = oldCols.get(i); + int newIdx = newCols.indexOf(oldCol); + if (newIdx != -1) { + oldCdIdToColIdxPairs.computeIfAbsent(cdId, k -> new ArrayList<>()).add(Pair.of(i, newIdx)); + } + } + } + } + } + + insertCDInBatch(newCdIds, newCdIdToCols); + // TODO: followed the jdo implement now, but it should be an error in such case: + // partitions use the default table cd, when changing partition cd with + // constraint key mapping, the constraints will be update unexpected. + updateKeyConstraintsInBatch(oldCdIdToNewCdId, oldCdIdToColIdxPairs); + + return sdIdToNewCdId; + } + + private void insertCDInBatch(List ids, Map> idToCols) + throws MetaException { + String insertCds = TxnUtils.createInsertPreparedStmt("\"CDS\"", Arrays.asList("\"CD_ID\"")); + int maxRows = dbType.getMaxRows(maxBatchSize, 1); + updateWithStatement(statement -> Batchable.runBatched(maxRows, ids, + new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long id : input) { + statement.setLong(1, id); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + }), insertCds); + + List columns = Arrays.asList("\"CD_ID\"", + "\"COMMENT\"", "\"COLUMN_NAME\"", "\"TYPE_NAME\"", "\"INTEGER_IDX\""); + String insertColumns = TxnUtils.createInsertPreparedStmt("\"COLUMNS_V2\"", columns); + int maxRowsForCDs = dbType.getMaxRows(maxBatchSize, 5); + updateWithStatement(statement -> Batchable.runBatched(maxRowsForCDs, ids, + new Batchable() { + @Override + public List run(List input) throws Exception { + for (Long id : input) { + List cols = idToCols.get(id); + for (int i = 0; i < cols.size(); i++) { + FieldSchema col = cols.get(i); + statement.setLong(1, id); + statement.setString(2, col.getComment()); + statement.setString(3, col.getName()); + statement.setString(4, col.getType()); + statement.setInt(5, i); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + }), insertColumns); + } + + private void updateKeyConstraintsInBatch(Map oldCdIdToNewCdId, + Map>> oldCdIdToColIdxPairs) throws MetaException { + List oldCdIds = new ArrayList<>(oldCdIdToNewCdId.keySet()); + String tableName = "\"KEY_CONSTRAINTS\""; + List parentColumns = Arrays.asList("\"PARENT_CD_ID\"", "\"PARENT_INTEGER_IDX\""); + List childColumns = Arrays.asList("\"CHILD_CD_ID\"", "\"CHILD_INTEGER_IDX\""); + + String updateParent = TxnUtils.createUpdatePreparedStmt(tableName, parentColumns, parentColumns); + String updateChild = TxnUtils.createUpdatePreparedStmt(tableName, childColumns, childColumns); + for (String updateStmt : new String[]{updateParent, updateChild}) { + int maxRows = dbType.getMaxRows(maxBatchSize, 4); + updateWithStatement(statement -> Batchable.runBatched(maxRows, oldCdIds, + new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long oldId : input) { + // Followed the jdo implement to update only mapping columns for KEY_CONSTRAINTS. + if (!oldCdIdToColIdxPairs.containsKey(oldId)) { + continue; + } + Long newId = oldCdIdToNewCdId.get(oldId); + for (Pair idx : oldCdIdToColIdxPairs.get(oldId)) { + statement.setLong(1, newId); + statement.setInt(2, idx.getRight()); + statement.setLong(3, oldId); + statement.setInt(4, idx.getLeft()); + statement.addBatch(); + } + } + statement.executeBatch(); + return null; + } + }), updateStmt); + } + } + + private void deleteCDInBatch(List cdIds) throws MetaException { + Batchable.runBatched(maxBatchSize, cdIds, new Batchable() { + @Override + public List run(List input) throws Exception { + String idLists = MetaStoreDirectSql.getIdListForIn(input); + // First remove any constraints that may be associated with these CDs + String deleteConstraintsByCd = "delete from \"KEY_CONSTRAINTS\" where \"CHILD_CD_ID\" in (" + + idLists + ") or \"PARENT_CD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteConstraintsByCd); + + // Then delete COLUMNS_V2 before CDS for foreign constraints. + String deleteColumns = "delete from \"COLUMNS_V2\" where \"CD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteColumns); + + // Finally delete CDS + String deleteCDs = "delete from \"CDS\" where \"CD_ID\" in (" + idLists + ")"; + updateWithStatement(PreparedStatement::executeUpdate, deleteCDs); + return null; + } + }); + } + + private void updateSerdeInBatch(List ids, Map idToSerde) + throws MetaException { + // Followed the jdo implement to update only NAME and SLIB of SERDES. + List columns = Arrays.asList("\"NAME\"", "\"SLIB\""); + List condKeys = Arrays.asList("\"SERDE_ID\""); + String updateStmt = TxnUtils.createUpdatePreparedStmt("\"SERDES\"", columns, condKeys); + List idWithSerde = filterIdsByNonNullValue(ids, idToSerde); + int maxRows = dbType.getMaxRows(maxBatchSize, 3); + updateWithStatement(statement -> Batchable.runBatched(maxRows, idWithSerde, + new Batchable() { + @Override + public List run(List input) throws SQLException { + for (Long id : input) { + SerDeInfo serde = idToSerde.get(id); + statement.setString(1, serde.getName()); + statement.setString(2, serde.getSerializationLib()); + statement.setLong(3, id); + statement.addBatch(); + } + statement.executeBatch(); + return null; + } + }), updateStmt); + } + + private static final class PartitionInfo { + long partitionId; + long writeId; + String partitionName; + public PartitionInfo(long partitionId, long writeId, String partitionName) { + this.partitionId = partitionId; + this.writeId = writeId; + this.partitionName = partitionName; + } + + @Override + public int hashCode() { + return (int)partitionId; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (!(o instanceof PartitionInfo)) { + return false; + } + PartitionInfo other = (PartitionInfo)o; + if (this.partitionId != other.partitionId) { + return false; + } + return true; + } + } + + private static final class PartColNameInfo { + long partitionId; + String colName; + String engine; + public PartColNameInfo(long partitionId, String colName, String engine) { + this.partitionId = partitionId; + this.colName = colName; + this.engine = engine; + } + + @Override + public int hashCode() { + return (int)partitionId; + } + + @Override + public boolean equals(Object o) + { + if (this == o) { + return true; + } + if (o == null) { + return false; + } + if (!(o instanceof PartColNameInfo)) { + return false; + } + PartColNameInfo other = (PartColNameInfo)o; + if (this.partitionId != other.partitionId) { + return false; + } + if (!this.colName.equalsIgnoreCase(other.colName)) { + return false; + } + return Objects.equals(this.engine, other.engine); + } + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdateStat.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdateStat.java deleted file mode 100644 index df1f77f20649..000000000000 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/DirectSqlUpdateStat.java +++ /dev/null @@ -1,727 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.metastore; - -import org.apache.hadoop.hive.common.StatsSetupConst; -import org.apache.hadoop.hive.metastore.api.ColumnStatistics; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.events.UpdatePartitionColumnStatEvent; -import org.apache.hadoop.hive.metastore.events.UpdatePartitionColumnStatEventBatch; -import org.apache.hadoop.hive.metastore.messaging.EventMessage; -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; -import org.apache.hadoop.hive.metastore.tools.SQLGenerator; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; - -import javax.jdo.PersistenceManager; -import javax.jdo.datastore.JDOConnection; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.locks.ReentrantLock; -import java.util.stream.Collectors; - -import static org.apache.hadoop.hive.common.StatsSetupConst.COLUMN_STATS_ACCURATE; -import static org.apache.hadoop.hive.metastore.HMSHandler.getPartValsFromName; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; - -/** - * This class contains the optimizations for MetaStore that rely on direct SQL access to - * the underlying database. It should use ANSI SQL and be compatible with common databases - * such as MySQL (note that MySQL doesn't use full ANSI mode by default), Postgres, etc. - * - * This class separates out the statistics update part from MetaStoreDirectSql class. - */ -class DirectSqlUpdateStat { - private static final Logger LOG = LoggerFactory.getLogger(DirectSqlUpdateStat.class.getName()); - PersistenceManager pm; - Configuration conf; - DatabaseProduct dbType; - int maxBatchSize; - SQLGenerator sqlGenerator; - private static final ReentrantLock derbyLock = new ReentrantLock(true); - - public DirectSqlUpdateStat(PersistenceManager pm, Configuration conf, - DatabaseProduct dbType, int batchSize) { - this.pm = pm; - this.conf = conf; - this.dbType = dbType; - this.maxBatchSize = batchSize; - sqlGenerator = new SQLGenerator(dbType, conf); - } - - /** - * {@link #lockInternal()} and {@link #unlockInternal()} are used to serialize those operations that require - * Select ... For Update to sequence operations properly. In practice that means when running - * with Derby database. See more notes at class level. - */ - private void lockInternal() { - if(dbType.isDERBY()) { - derbyLock.lock(); - } - } - - private void unlockInternal() { - if(dbType.isDERBY()) { - derbyLock.unlock(); - } - } - - void rollbackDBConn(Connection dbConn) { - try { - if (dbConn != null && !dbConn.isClosed()) dbConn.rollback(); - } catch (SQLException e) { - LOG.warn("Failed to rollback db connection ", e); - } - } - - void closeDbConn(JDOConnection jdoConn) { - try { - if (jdoConn != null) { - jdoConn.close(); - } - } catch (Exception e) { - LOG.warn("Failed to close db connection", e); - } - } - - void closeStmt(Statement stmt) { - try { - if (stmt != null && !stmt.isClosed()) stmt.close(); - } catch (SQLException e) { - LOG.warn("Failed to close statement ", e); - } - } - - void close(ResultSet rs) { - try { - if (rs != null && !rs.isClosed()) { - rs.close(); - } - } - catch(SQLException ex) { - LOG.warn("Failed to close statement ", ex); - } - } - - static String quoteString(String input) { - return "'" + input + "'"; - } - - void close(ResultSet rs, Statement stmt, JDOConnection dbConn) { - close(rs); - closeStmt(stmt); - closeDbConn(dbConn); - } - - private void populateInsertUpdateMap(Map statsPartInfoMap, - Map updateMap, - MapinsertMap, - Connection dbConn, Table tbl) throws SQLException, MetaException, NoSuchObjectException { - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - Statement statement = null; - ResultSet rs = null; - List queries = new ArrayList<>(); - Set selectedParts = new HashSet<>(); - - List partIdList = statsPartInfoMap.keySet().stream().map( - e -> e.partitionId).collect(Collectors.toList() - ); - - prefix.append("select \"PART_ID\", \"COLUMN_NAME\" from \"PART_COL_STATS\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, - partIdList, "\"PART_ID\"", true, false); - - for (String query : queries) { - try { - statement = dbConn.createStatement(); - LOG.debug("Going to execute query " + query); - rs = statement.executeQuery(query); - while (rs.next()) { - selectedParts.add(new PartColNameInfo(rs.getLong(1), rs.getString(2))); - } - } finally { - close(rs, statement, null); - } - } - - for (Map.Entry entry : statsPartInfoMap.entrySet()) { - PartitionInfo partitionInfo = (PartitionInfo) entry.getKey(); - ColumnStatistics colStats = (ColumnStatistics) entry.getValue(); - long partId = partitionInfo.partitionId; - ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); - if (!statsDesc.isSetCatName()) { - statsDesc.setCatName(tbl.getCatName()); - } - for (ColumnStatisticsObj statisticsObj : colStats.getStatsObj()) { - PartColNameInfo temp = new PartColNameInfo(partId, statisticsObj.getColName()); - if (selectedParts.contains(temp)) { - updateMap.put(temp, StatObjectConverter. - convertToMPartitionColumnStatistics(null, statsDesc, statisticsObj, colStats.getEngine())); - } else { - insertMap.put(temp, StatObjectConverter. - convertToMPartitionColumnStatistics(null, statsDesc, statisticsObj, colStats.getEngine())); - } - } - } - } - - private void updatePartColStatTable(Map updateMap, - Connection dbConn) throws SQLException, MetaException, NoSuchObjectException { - PreparedStatement pst = null; - for (Map.Entry entry : updateMap.entrySet()) { - PartColNameInfo partColNameInfo = (PartColNameInfo) entry.getKey(); - Long partId = partColNameInfo.partitionId; - MPartitionColumnStatistics mPartitionColumnStatistics = (MPartitionColumnStatistics) entry.getValue(); - String update = "UPDATE \"PART_COL_STATS\" SET "; - update += StatObjectConverter.getUpdatedColumnSql(mPartitionColumnStatistics); - update += " WHERE \"PART_ID\" = " + partId + " AND " - + " \"COLUMN_NAME\" = " + quoteString(mPartitionColumnStatistics.getColName()); - try { - pst = dbConn.prepareStatement(update); - StatObjectConverter.initUpdatedColumnStatement(mPartitionColumnStatistics, pst); - LOG.debug("Going to execute update " + update); - int numUpdate = pst.executeUpdate(); - if (numUpdate != 1) { - throw new MetaException("Invalid state of PART_COL_STATS for PART_ID " + partId); - } - } finally { - closeStmt(pst); - } - } - } - - private void insertIntoPartColStatTable(Map insertMap, - long maxCsId, - Connection dbConn) throws SQLException, MetaException, NoSuchObjectException { - PreparedStatement preparedStatement = null; - int numRows = 0; - String insert = "INSERT INTO \"PART_COL_STATS\" (\"CS_ID\", \"CAT_NAME\", \"DB_NAME\"," - + "\"TABLE_NAME\", \"PARTITION_NAME\", \"COLUMN_NAME\", \"COLUMN_TYPE\", \"PART_ID\"," - + " \"LONG_LOW_VALUE\", \"LONG_HIGH_VALUE\", \"DOUBLE_HIGH_VALUE\", \"DOUBLE_LOW_VALUE\"," - + " \"BIG_DECIMAL_LOW_VALUE\", \"BIG_DECIMAL_HIGH_VALUE\", \"NUM_NULLS\", \"NUM_DISTINCTS\", \"BIT_VECTOR\" ," - + " \"HISTOGRAM\", \"AVG_COL_LEN\", \"MAX_COL_LEN\", \"NUM_TRUES\", \"NUM_FALSES\", \"LAST_ANALYZED\", \"ENGINE\") values " - + "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; - - try { - preparedStatement = dbConn.prepareStatement(insert); - for (Map.Entry entry : insertMap.entrySet()) { - PartColNameInfo partColNameInfo = (PartColNameInfo) entry.getKey(); - Long partId = partColNameInfo.partitionId; - MPartitionColumnStatistics mPartitionColumnStatistics = (MPartitionColumnStatistics) entry.getValue(); - - preparedStatement.setLong(1, maxCsId); - preparedStatement.setString(2, mPartitionColumnStatistics.getCatName()); - preparedStatement.setString(3, mPartitionColumnStatistics.getDbName()); - preparedStatement.setString(4, mPartitionColumnStatistics.getTableName()); - preparedStatement.setString(5, mPartitionColumnStatistics.getPartitionName()); - preparedStatement.setString(6, mPartitionColumnStatistics.getColName()); - preparedStatement.setString(7, mPartitionColumnStatistics.getColType()); - preparedStatement.setLong(8, partId); - preparedStatement.setObject(9, mPartitionColumnStatistics.getLongLowValue()); - preparedStatement.setObject(10, mPartitionColumnStatistics.getLongHighValue()); - preparedStatement.setObject(11, mPartitionColumnStatistics.getDoubleHighValue()); - preparedStatement.setObject(12, mPartitionColumnStatistics.getDoubleLowValue()); - preparedStatement.setString(13, mPartitionColumnStatistics.getDecimalLowValue()); - preparedStatement.setString(14, mPartitionColumnStatistics.getDecimalHighValue()); - preparedStatement.setObject(15, mPartitionColumnStatistics.getNumNulls()); - preparedStatement.setObject(16, mPartitionColumnStatistics.getNumDVs()); - preparedStatement.setObject(17, mPartitionColumnStatistics.getBitVector()); - preparedStatement.setBytes(18, mPartitionColumnStatistics.getHistogram()); - preparedStatement.setObject(19, mPartitionColumnStatistics.getAvgColLen()); - preparedStatement.setObject(20, mPartitionColumnStatistics.getMaxColLen()); - preparedStatement.setObject(21, mPartitionColumnStatistics.getNumTrues()); - preparedStatement.setObject(22, mPartitionColumnStatistics.getNumFalses()); - preparedStatement.setLong(23, mPartitionColumnStatistics.getLastAnalyzed()); - preparedStatement.setString(24, mPartitionColumnStatistics.getEngine()); - - maxCsId++; - numRows++; - preparedStatement.addBatch(); - if (numRows == maxBatchSize) { - preparedStatement.executeBatch(); - numRows = 0; - } - } - - if (numRows != 0) { - preparedStatement.executeBatch(); - } - } finally { - closeStmt(preparedStatement); - } - } - - private Map getParamValues(Connection dbConn, List partIdList) throws SQLException { - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - Statement statement = null; - ResultSet rs = null; - - prefix.append("select \"PART_ID\", \"PARAM_VALUE\" " - + " from \"PARTITION_PARAMS\" where " - + " \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE' " - + " and "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, - partIdList, "\"PART_ID\"", true, false); - - Map partIdToParaMap = new HashMap<>(); - for (String query : queries) { - try { - statement = dbConn.createStatement(); - LOG.debug("Going to execute query " + query); - rs = statement.executeQuery(query); - while (rs.next()) { - partIdToParaMap.put(rs.getLong(1), rs.getString(2)); - } - } finally { - close(rs, statement, null); - } - } - return partIdToParaMap; - } - - private void updateWriteIdForPartitions(Connection dbConn, long writeId, List partIdList) throws SQLException { - StringBuilder prefix = new StringBuilder(); - List queries = new ArrayList<>(); - StringBuilder suffix = new StringBuilder(); - - prefix.append("UPDATE \"PARTITIONS\" set \"WRITE_ID\" = " + writeId + " where "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, - partIdList, "\"PART_ID\"", false, false); - - Statement statement = null; - for (String query : queries) { - try { - statement = dbConn.createStatement(); - LOG.debug("Going to execute update " + query); - statement.executeUpdate(query); - } finally { - closeStmt(statement); - } - } - } - - private Map> updatePartitionParamTable(Connection dbConn, - Map partitionInfoMap, - String validWriteIds, - long writeId, - boolean isAcidTable) - throws SQLException, MetaException { - Map> result = new HashMap<>(); - boolean areTxnStatsSupported = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED); - PreparedStatement statementInsert = null; - PreparedStatement statementDelete = null; - PreparedStatement statementUpdate = null; - String insert = "INSERT INTO \"PARTITION_PARAMS\" (\"PART_ID\", \"PARAM_KEY\", \"PARAM_VALUE\") " - + "VALUES( ? , 'COLUMN_STATS_ACCURATE' , ? )"; - String delete = "DELETE from \"PARTITION_PARAMS\" " - + " where \"PART_ID\" = ? " - + " and \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE'"; - String update = "UPDATE \"PARTITION_PARAMS\" set \"PARAM_VALUE\" = ? " - + " where \"PART_ID\" = ? " - + " and \"PARAM_KEY\" = 'COLUMN_STATS_ACCURATE'"; - int numInsert = 0; - int numDelete = 0; - int numUpdate = 0; - - List partIdList = partitionInfoMap.keySet().stream().map( - e -> e.partitionId).collect(Collectors.toList() - ); - - // get the old parameters from PARTITION_PARAMS table. - Map partIdToParaMap = getParamValues(dbConn, partIdList); - - try { - statementInsert = dbConn.prepareStatement(insert); - statementDelete = dbConn.prepareStatement(delete); - statementUpdate = dbConn.prepareStatement(update); - for (Map.Entry entry : partitionInfoMap.entrySet()) { - PartitionInfo partitionInfo = (PartitionInfo) entry.getKey(); - ColumnStatistics colStats = (ColumnStatistics) entry.getValue(); - List colNames = colStats.getStatsObj().stream().map(e -> e.getColName()).collect(Collectors.toList()); - long partWriteId = partitionInfo.writeId; - long partId = partitionInfo.partitionId; - Map newParameter; - - if (!partIdToParaMap.containsKey(partId)) { - newParameter = new HashMap<>(); - newParameter.put(COLUMN_STATS_ACCURATE, "TRUE"); - StatsSetupConst.setColumnStatsState(newParameter, colNames); - statementInsert.setLong(1, partId); - statementInsert.setString(2, newParameter.get(COLUMN_STATS_ACCURATE)); - numInsert++; - statementInsert.addBatch(); - if (numInsert == maxBatchSize) { - LOG.debug(" Executing insert " + insert); - statementInsert.executeBatch(); - numInsert = 0; - } - } else { - String oldStats = partIdToParaMap.get(partId); - - Map oldParameter = new HashMap<>(); - oldParameter.put(COLUMN_STATS_ACCURATE, oldStats); - - newParameter = new HashMap<>(); - newParameter.put(COLUMN_STATS_ACCURATE, oldStats); - StatsSetupConst.setColumnStatsState(newParameter, colNames); - - if (isAcidTable) { - String errorMsg = ObjectStore.verifyStatsChangeCtx( - colStats.getStatsDesc().getDbName() + "." + colStats.getStatsDesc().getTableName(), - oldParameter, newParameter, writeId, validWriteIds, true); - if (errorMsg != null) { - throw new MetaException(errorMsg); - } - } - - if (isAcidTable && - (!areTxnStatsSupported || !ObjectStore.isCurrentStatsValidForTheQuery(oldParameter, partWriteId, - validWriteIds, true))) { - statementDelete.setLong(1, partId); - statementDelete.addBatch(); - numDelete++; - if (numDelete == maxBatchSize) { - statementDelete.executeBatch(); - numDelete = 0; - LOG.debug("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " - + colStats.getStatsDesc().getDbName() + "." + colStats.getStatsDesc().getTableName() + "." - + colStats.getStatsDesc().getPartName()); - } - } else { - statementUpdate.setString(1, newParameter.get(COLUMN_STATS_ACCURATE)); - statementUpdate.setLong(2, partId); - statementUpdate.addBatch(); - numUpdate++; - if (numUpdate == maxBatchSize) { - LOG.debug(" Executing update " + statementUpdate); - statementUpdate.executeBatch(); - numUpdate = 0; - } - } - } - result.put(partitionInfo.partitionName, newParameter); - } - - if (numInsert != 0) { - statementInsert.executeBatch(); - } - - if (numUpdate != 0) { - statementUpdate.executeBatch(); - } - - if (numDelete != 0) { - statementDelete.executeBatch(); - } - - if (isAcidTable) { - updateWriteIdForPartitions(dbConn, writeId, partIdList); - } - return result; - } finally { - closeStmt(statementInsert); - closeStmt(statementUpdate); - closeStmt(statementDelete); - } - } - - private static class PartitionInfo { - long partitionId; - long writeId; - String partitionName; - public PartitionInfo(long partitionId, long writeId, String partitionName) { - this.partitionId = partitionId; - this.writeId = writeId; - this.partitionName = partitionName; - } - - @Override - public int hashCode() { - return (int)partitionId; - } - - @Override - public boolean equals(Object o) - { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (!(o instanceof PartitionInfo)) { - return false; - } - PartitionInfo other = (PartitionInfo)o; - if (this.partitionId != other.partitionId) { - return false; - } - return true; - } - } - - private static class PartColNameInfo { - long partitionId; - String colName; - public PartColNameInfo(long partitionId, String colName) { - this.partitionId = partitionId; - this.colName = colName; - } - - @Override - public int hashCode() { - return (int)partitionId; - } - - @Override - public boolean equals(Object o) - { - if (this == o) { - return true; - } - if (o == null) { - return false; - } - if (!(o instanceof PartColNameInfo)) { - return false; - } - PartColNameInfo other = (PartColNameInfo)o; - if (this.partitionId != other.partitionId) { - return false; - } - if (this.colName.equalsIgnoreCase(other.colName)) { - return true; - } - return false; - } - } - - private Map getPartitionInfo(Connection dbConn, long tblId, - Map partColStatsMap) - throws SQLException, MetaException { - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - Statement statement = null; - ResultSet rs = null; - Map partitionInfoMap = new HashMap<>(); - - List partKeys = partColStatsMap.keySet().stream().map( - e -> quoteString(e)).collect(Collectors.toList() - ); - - prefix.append("select \"PART_ID\", \"WRITE_ID\", \"PART_NAME\" from \"PARTITIONS\" where "); - suffix.append(" and \"TBL_ID\" = " + tblId); - TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, - partKeys, "\"PART_NAME\"", true, false); - - for (String query : queries) { - // Select for update makes sure that the partitions are not modified while the stats are getting updated. - query = sqlGenerator.addForUpdateClause(query); - try { - statement = dbConn.createStatement(); - LOG.debug("Going to execute query <" + query + ">"); - rs = statement.executeQuery(query); - while (rs.next()) { - PartitionInfo partitionInfo = new PartitionInfo(rs.getLong(1), - rs.getLong(2), rs.getString(3)); - partitionInfoMap.put(partitionInfo, partColStatsMap.get(rs.getString(3))); - } - } finally { - close(rs, statement, null); - } - } - return partitionInfoMap; - } - - private void setAnsiQuotes(Connection dbConn) throws SQLException { - if (sqlGenerator.getDbProduct().isMYSQL()) { - try (Statement stmt = dbConn.createStatement()) { - stmt.execute("SET @@session.sql_mode=ANSI_QUOTES"); - } - } - } - - /** - * Update the statistics for the given partitions. Add the notification logs also. - * @return map of partition key to column stats if successful, null otherwise. - */ - public Map> updatePartitionColumnStatistics(Map partColStatsMap, - Table tbl, long csId, - String validWriteIds, long writeId, - List transactionalListeners) - throws MetaException { - JDOConnection jdoConn = null; - Connection dbConn = null; - boolean committed = false; - try { - lockInternal(); - jdoConn = pm.getDataStoreConnection(); - dbConn = (Connection) (jdoConn.getNativeConnection()); - - setAnsiQuotes(dbConn); - - Map partitionInfoMap = getPartitionInfo(dbConn, tbl.getId(), partColStatsMap); - - Map> result = - updatePartitionParamTable(dbConn, partitionInfoMap, validWriteIds, writeId, TxnUtils.isAcidTable(tbl)); - - Map insertMap = new HashMap<>(); - Map updateMap = new HashMap<>(); - populateInsertUpdateMap(partitionInfoMap, updateMap, insertMap, dbConn, tbl); - - LOG.info("Number of stats to insert " + insertMap.size() + " update " + updateMap.size()); - - if (insertMap.size() != 0) { - insertIntoPartColStatTable(insertMap, csId, dbConn); - } - - if (updateMap.size() != 0) { - updatePartColStatTable(updateMap, dbConn); - } - - if (transactionalListeners != null) { - UpdatePartitionColumnStatEventBatch eventBatch = new UpdatePartitionColumnStatEventBatch(null); - for (Map.Entry entry : result.entrySet()) { - Map parameters = (Map) entry.getValue(); - ColumnStatistics colStats = partColStatsMap.get(entry.getKey()); - List partVals = getPartValsFromName(tbl, colStats.getStatsDesc().getPartName()); - UpdatePartitionColumnStatEvent event = new UpdatePartitionColumnStatEvent(colStats, partVals, parameters, - tbl, writeId, null); - eventBatch.addPartColStatEvent(event); - } - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.UPDATE_PARTITION_COLUMN_STAT_BATCH, eventBatch, dbConn, sqlGenerator); - } - dbConn.commit(); - committed = true; - return result; - } catch (Exception e) { - LOG.error("Unable to update Column stats for " + tbl.getTableName(), e); - throw new MetaException("Unable to update Column stats for " + tbl.getTableName() - + " due to: " + e.getMessage()); - } finally { - if (!committed) { - rollbackDBConn(dbConn); - } - closeDbConn(jdoConn); - unlockInternal(); - } - } - - /** - * Gets the next CS id from sequence MPartitionColumnStatistics and increment the CS id by numStats. - * @return The CD id before update. - */ - public long getNextCSIdForMPartitionColumnStatistics(long numStats) throws MetaException { - Statement statement = null; - ResultSet rs = null; - long maxCsId = 0; - boolean committed = false; - Connection dbConn = null; - JDOConnection jdoConn = null; - - try { - lockInternal(); - jdoConn = pm.getDataStoreConnection(); - dbConn = (Connection) (jdoConn.getNativeConnection()); - - setAnsiQuotes(dbConn); - - // This loop will be iterated at max twice. If there is no records, it will first insert and then do a select. - // We are not using any upsert operations as select for update and then update is required to make sure that - // the caller gets a reserved range for CSId not used by any other thread. - boolean insertDone = false; - while (maxCsId == 0) { - String query = sqlGenerator.addForUpdateClause("SELECT \"NEXT_VAL\" FROM \"SEQUENCE_TABLE\" " - + "WHERE \"SEQUENCE_NAME\"= " - + quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics")); - LOG.debug("Going to execute query " + query); - statement = dbConn.createStatement(); - rs = statement.executeQuery(query); - if (rs.next()) { - maxCsId = rs.getLong(1); - } else if (insertDone) { - throw new MetaException("Invalid state of SEQUENCE_TABLE for MPartitionColumnStatistics"); - } else { - insertDone = true; - closeStmt(statement); - statement = dbConn.createStatement(); - query = "INSERT INTO \"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") VALUES ( " - + quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics") + "," + 1 - + ")"; - try { - statement.executeUpdate(query); - } catch (SQLException e) { - // If the record is already inserted by some other thread continue to select. - if (dbType.isDuplicateKeyError(e)) { - continue; - } - LOG.error("Unable to insert into SEQUENCE_TABLE for MPartitionColumnStatistics.", e); - throw e; - } finally { - closeStmt(statement); - } - } - } - - long nextMaxCsId = maxCsId + numStats + 1; - closeStmt(statement); - statement = dbConn.createStatement(); - String query = "UPDATE \"SEQUENCE_TABLE\" SET \"NEXT_VAL\" = " - + nextMaxCsId - + " WHERE \"SEQUENCE_NAME\" = " - + quoteString("org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics"); - statement.executeUpdate(query); - dbConn.commit(); - committed = true; - return maxCsId; - } catch (Exception e) { - LOG.error("Unable to getNextCSIdForMPartitionColumnStatistics", e); - throw new MetaException("Unable to getNextCSIdForMPartitionColumnStatistics " - + " due to: " + e.getMessage()); - } finally { - if (!committed) { - rollbackDBConn(dbConn); - } - close(rs, statement, jdoConn); - unlockInternal(); - } - } -} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java index 844ebf0031d0..cd50a3480ee1 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java @@ -48,8 +48,6 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.dataconnector.DataConnectorProviderFactory; import org.apache.hadoop.hive.metastore.events.*; -import org.apache.hadoop.hive.metastore.leader.HouseKeepingTasks; -import org.apache.hadoop.hive.metastore.leader.LeaderElectionContext; import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType; import org.apache.hadoop.hive.metastore.metrics.Metrics; @@ -62,6 +60,7 @@ import org.apache.hadoop.hive.metastore.properties.PropertyMap; import org.apache.hadoop.hive.metastore.properties.PropertyStore; import org.apache.hadoop.hive.metastore.txn.*; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.FilterUtils; import org.apache.hadoop.hive.metastore.utils.HdfsUtils; @@ -130,11 +129,6 @@ public class HMSHandler extends FacebookBase implements IHMSHandler { private final Configuration conf; // stores datastore (jpox) properties, // right now they come from jpox.properties - // Flag to control that always threads are initialized only once - // instead of multiple times - private final static AtomicBoolean alwaysThreadsInitialized = - new AtomicBoolean(false); - private static String currentUrl; private FileMetadataManager fileMetadataManager; private PartitionExpressionProxy expressionProxy; @@ -151,6 +145,12 @@ public class HMSHandler extends FacebookBase implements IHMSHandler { static final String NO_FILTER_STRING = ""; static final int UNLIMITED_MAX_PARTITIONS = -1; + static final int LOG_SAMPLE_PARTITIONS_MAX_SIZE = 4; + + static final int LOG_SAMPLE_PARTITIONS_HALF_SIZE = 2; + + static final String LOG_SAMPLE_PARTITIONS_SEPARATOR = ","; + private Warehouse wh; // hdfs warehouse private static Striped tablelocks; @@ -370,12 +370,6 @@ public void init() throws MetaException { partitionValidationPattern = Pattern.compile(partitionValidationRegex); } - // We only initialize once the tasks that need to be run periodically. For remote metastore - // these threads are started along with the other housekeeping threads only in the leader - // HMS. - if (!HiveMetaStore.isMetaStoreRemote()) { - startAlwaysTaskThreads(conf, this); - } expressionProxy = PartFilterExprUtil.createExpressionProxy(conf); fileMetadataManager = new FileMetadataManager(this.getMS(), conf); @@ -395,20 +389,6 @@ public void init() throws MetaException { dataconnectorFactory = DataConnectorProviderFactory.getInstance(this); } - static void startAlwaysTaskThreads(Configuration conf, IHMSHandler handler) throws MetaException { - if (alwaysThreadsInitialized.compareAndSet(false, true)) { - try { - LeaderElectionContext context = new LeaderElectionContext.ContextBuilder(conf) - .setTType(LeaderElectionContext.TTYPE.ALWAYS_TASKS) - .addListener(new HouseKeepingTasks(conf, false)) - .setHMSHandler(handler).build(); - context.start(); - } catch (Exception e) { - throw newMetaException(e); - } - } - } - /** * * Filter is actually enabled only when the configured filter hook is configured, not default, and @@ -804,7 +784,7 @@ private void startMultiTableFunction(String function, String db, List tb private void startPartitionFunction(String function, String cat, String db, String tbl, List partVals) { startFunction(function, " : tbl=" + - TableName.getQualified(cat, db, tbl) + "[" + join(partVals, ",") + "]"); + TableName.getQualified(cat, db, tbl) + samplePartitionValues(partVals)); } private void startPartitionFunction(String function, String catName, String db, String tbl, @@ -813,6 +793,52 @@ private void startPartitionFunction(String function, String catName, String db, TableName.getQualified(catName, db, tbl) + " partition=" + partName); } + private void startPartitionFunction(String function, String catName, String db, String tbl, int maxParts) { + startFunction(function, " : tbl=" + TableName.getQualified(catName, db, tbl) + ": Max partitions =" + maxParts); + } + + private void startPartitionFunction(String function, String catName, String db, String tbl, int maxParts, + List partVals) { + startFunction(function, " : tbl=" + TableName.getQualified(catName, db, tbl) + ": Max partitions =" + maxParts + + samplePartitionValues(partVals)); + } + + private void startPartitionFunction(String function, String catName, String db, String tbl, int maxParts, + String filter) { + startFunction(function, + " : tbl=" + TableName.getQualified(catName, db, tbl) + ": Filter=" + filter + ": Max partitions =" + + maxParts); + } + + private void startPartitionFunction(String function, String catName, String db, String tbl, int maxParts, + String expression, String defaultPartitionName) { + startFunction(function, " : tbl=" + TableName.getQualified(catName, db, tbl) + ": Expression=" + expression + + ": Default partition name=" + defaultPartitionName + ": Max partitions=" + maxParts); + } + + private String getGroupsCountAndUsername(final String user_name, final List group_names) { + return ". Number of groups= " + (group_names == null ? 0 : group_names.size()) + ", user name= " + user_name; + } + + private String samplePartitionValues(List partVals) { + if (CollectionUtils.isEmpty(partVals)) { + return ": Partitions = []"; + } + StringBuilder sb = new StringBuilder(": Number of Partitions = " + partVals.size()); + sb.append(": Partitions = ["); + if (partVals.size() > LOG_SAMPLE_PARTITIONS_MAX_SIZE) { + // extracting starting 2 values, and ending 2 values + sb.append(join(partVals.subList(0, LOG_SAMPLE_PARTITIONS_HALF_SIZE), LOG_SAMPLE_PARTITIONS_SEPARATOR)); + sb.append(" .... "); + sb.append(join(partVals.subList(partVals.size() - LOG_SAMPLE_PARTITIONS_HALF_SIZE, partVals.size()), + LOG_SAMPLE_PARTITIONS_SEPARATOR)); + } else { + sb.append(join(partVals, LOG_SAMPLE_PARTITIONS_SEPARATOR)); + } + sb.append("]"); + return sb.toString(); + } + private void endFunction(String function, boolean successful, Exception e) { endFunction(function, successful, e, null); } @@ -2905,7 +2931,7 @@ private boolean drop_table_core(final RawStore ms, final String catName, final S firePreEvent(new PreDropTableEvent(tbl, deleteData, this)); tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); - if (tbl.getSd().getLocation() != null) { + if (tableDataShouldBeDeleted && tbl.getSd().getLocation() != null) { tblPath = new Path(tbl.getSd().getLocation()); if (!wh.isWritable(tblPath.getParent())) { String target = indexName == null ? "Table" : "Index table"; @@ -4907,6 +4933,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam Table tbl = null; Partition part = null; boolean mustPurge = false; + boolean tableDataShouldBeDeleted = false; long writeId = 0; Map transactionalListenerResponses = Collections.emptyMap(); @@ -4930,7 +4957,8 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam request.setCatName(catName); tbl = get_table_core(request); firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); - + + tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); mustPurge = isMustPurge(envContext, tbl); writeId = getWriteId(envContext); @@ -4938,12 +4966,12 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam throw new NoSuchObjectException("Partition doesn't exist. " + part_vals); } isArchived = MetaStoreUtils.isArchived(part); - if (isArchived) { + if (tableDataShouldBeDeleted && isArchived) { archiveParentDir = MetaStoreUtils.getOriginalLocation(part); verifyIsWritablePath(archiveParentDir); } - if ((part.getSd() != null) && (part.getSd().getLocation() != null)) { + if (tableDataShouldBeDeleted && (part.getSd() != null) && (part.getSd().getLocation() != null)) { partPath = new Path(part.getSd().getLocation()); verifyIsWritablePath(partPath); } @@ -4963,9 +4991,7 @@ private boolean drop_partition_common(RawStore ms, String catName, String db_nam } finally { if (!success) { ms.rollbackTransaction(); - } else if (checkTableDataShouldBeDeleted(tbl, deleteData) && - (partPath != null || archiveParentDir != null)) { - + } else if (tableDataShouldBeDeleted && (partPath != null || archiveParentDir != null)) { LOG.info(mustPurge ? "dropPartition() will purge " + partPath + " directly, skipping trash." : "dropPartition() will move " + partPath + " to trash-directory."); @@ -5095,7 +5121,7 @@ public DropPartitionsResult drop_partitions_req( boolean deleteData = request.isSetDeleteData() && request.isDeleteData(); boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection(); boolean needResult = !request.isSetNeedResult() || request.isNeedResult(); - + List dirsToDelete = new ArrayList<>(); List archToDelete = new ArrayList<>(); EnvironmentContext envContext = @@ -5105,20 +5131,22 @@ public DropPartitionsResult drop_partitions_req( Table tbl = null; List parts = null; boolean mustPurge = false; + boolean tableDataShouldBeDeleted = false; long writeId = 0; Map transactionalListenerResponses = null; boolean needsCm = false; - + try { ms.openTransaction(); // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. tbl = get_table_core(catName, dbName, tblName); mustPurge = isMustPurge(envContext, tbl); + tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, deleteData); writeId = getWriteId(envContext); - int minCount = 0; + boolean hasMissingParts = false; RequestPartsSpec spec = request.getParts(); List partNames = null; @@ -5126,7 +5154,6 @@ public DropPartitionsResult drop_partitions_req( // Dropping by expressions. parts = new ArrayList<>(spec.getExprs().size()); for (DropPartitionsExpr expr : spec.getExprs()) { - ++minCount; // At least one partition per expression, if not ifExists List result = new ArrayList<>(); boolean hasUnknown = ms.getPartitionsByExpr(catName, dbName, tblName, result, new GetPartitionsArgs.GetPartitionsArgsBuilder() @@ -5147,20 +5174,27 @@ public DropPartitionsResult drop_partitions_req( } } } + if (result.isEmpty()) { + hasMissingParts = true; + if (!ifExists) { + // fail-fast for missing partition expr + break; + } + } parts.addAll(result); } } else if (spec.isSetNames()) { partNames = spec.getNames(); - minCount = partNames.size(); parts = ms.getPartitionsByNames(catName, dbName, tblName, new GetPartitionsArgs.GetPartitionsArgsBuilder() .partNames(partNames).skipColumnSchemaForPartition(request.isSkipColumnSchemaForPartition()) .build()); + hasMissingParts = (parts.size() != partNames.size()); } else { throw new MetaException("Partition spec is not set"); } - if ((parts.size() < minCount) && !ifExists) { + if (hasMissingParts && !ifExists) { throw new NoSuchObjectException("Some partitions to drop are missing"); } @@ -5181,14 +5215,12 @@ public DropPartitionsResult drop_partitions_req( if (colNames != null) { partNames.add(FileUtils.makePartName(colNames, part.getValues())); } - // Preserve the old behavior of failing when we cannot write, even w/o deleteData, - // and even if the table is external. That might not make any sense. - if (MetaStoreUtils.isArchived(part)) { + if (tableDataShouldBeDeleted && MetaStoreUtils.isArchived(part)) { Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part); verifyIsWritablePath(archiveParentDir); archToDelete.add(archiveParentDir); } - if ((part.getSd() != null) && (part.getSd().getLocation() != null)) { + if (tableDataShouldBeDeleted && (part.getSd() != null) && (part.getSd().getLocation() != null)) { Path partPath = new Path(part.getSd().getLocation()); verifyIsWritablePath(partPath); dirsToDelete.add(new PathAndDepth(partPath, part.getValues().size())); @@ -5212,7 +5244,7 @@ public DropPartitionsResult drop_partitions_req( } finally { if (!success) { ms.rollbackTransaction(); - } else if (checkTableDataShouldBeDeleted(tbl, deleteData)) { + } else if (tableDataShouldBeDeleted) { LOG.info(mustPurge ? "dropPartition() will purge partition-directories directly, skipping trash." : "dropPartition() will move partition-directories to trash-directory."); @@ -5442,8 +5474,9 @@ public Partition get_partition_with_auth(final String db_name, final String user_name, final List group_names) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partition_with_auth", parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_vals); + startFunction("get_partition_with_auth", + " : tbl=" + TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name) + + samplePartitionValues(part_vals) + getGroupsCountAndUsername(user_name,group_names)); fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); Partition ret = null; Exception ex = null; @@ -5581,7 +5614,7 @@ public List get_partitions_pspec(final String db_name, final Stri String dbName = parsedDbName[DB_NAME]; String tableName = tbl_name.toLowerCase(); - startTableFunction("get_partitions_pspec", catName, dbName, tableName); + startPartitionFunction("get_partitions_pspec", catName, dbName, tableName, max_parts); List partitionSpecs = null; try { @@ -5667,7 +5700,7 @@ private static boolean is_partition_spec_grouping_enabled(Table table) { public List get_partition_names(final String db_name, final String tbl_name, final short max_parts) throws NoSuchObjectException, MetaException { String[] parsedDbName = parseDbName(db_name, conf); - startTableFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); + startPartitionFunction("get_partition_names", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, max_parts); fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; @@ -5692,7 +5725,9 @@ public PartitionValuesResponse get_partition_values(PartitionValuesRequest reque String catName = request.isSetCatName() ? request.getCatName() : getDefaultCatalog(conf); String dbName = request.getDbName(); String tblName = request.getTblName(); - + long maxParts = request.getMaxParts(); + String filter = request.isSetFilter() ? request.getFilter() : ""; + startPartitionFunction("get_partition_values", catName, dbName, tblName, (int) maxParts, filter); try { authorizeTableForPartitionMetadata(catName, dbName, tblName); @@ -5724,9 +5759,8 @@ public void alter_partition_with_environment_context(final String dbName, final EnvironmentContext envContext) throws TException { String[] parsedDbName = parseDbName(dbName, conf); - // TODO: this method name is confusing, it actually does full alter (sortof) - rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, newPartition, - envContext, null); + alter_partition_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tableName, null, + newPartition, envContext, null); } @Deprecated @@ -5734,9 +5768,9 @@ public void alter_partition_with_environment_context(final String dbName, public void rename_partition(final String db_name, final String tbl_name, final List part_vals, final Partition new_part) throws TException { - // Call rename_partition without an environment context. + // Call alter_partition_core without an environment context. String[] parsedDbName = parseDbName(db_name, conf); - rename_partition(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part, + alter_partition_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, part_vals, new_part, null, null); } @@ -5746,12 +5780,12 @@ public RenamePartitionResponse rename_partition_req(RenamePartitionRequest req) context.putToProperties(RENAME_PARTITION_MAKE_COPY, String.valueOf(req.isClonePart())); context.putToProperties(hive_metastoreConstants.TXN_ID, String.valueOf(req.getTxnId())); - rename_partition(req.getCatName(), req.getDbName(), req.getTableName(), req.getPartVals(), + alter_partition_core(req.getCatName(), req.getDbName(), req.getTableName(), req.getPartVals(), req.getNewPart(), context, req.getValidWriteIdList()); return new RenamePartitionResponse(); }; - private void rename_partition(String catName, String db_name, String tbl_name, + private void alter_partition_core(String catName, String db_name, String tbl_name, List part_vals, Partition new_part, EnvironmentContext envContext, String validWriteIds) throws TException { startTableFunction("alter_partition", catName, db_name, tbl_name); @@ -5780,8 +5814,7 @@ private void rename_partition(String catName, String db_name, String tbl_name, Partition oldPart = null; Exception ex = null; try { - Table table = null; - table = getMS().getTable(catName, db_name, tbl_name, null); + Table table = getMS().getTable(catName, db_name, tbl_name, null); firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, table, part_vals, new_part, this)); if (part_vals != null && !part_vals.isEmpty()) { @@ -5792,8 +5825,6 @@ private void rename_partition(String catName, String db_name, String tbl_name, oldPart = alterHandler.alterPartition(getMS(), wh, catName, db_name, tbl_name, part_vals, new_part, envContext, this, validWriteIds); - // Only fetch the table if we actually have a listener - if (!listeners.isEmpty()) { MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ALTER_PARTITION, @@ -5856,7 +5887,8 @@ private void alter_partitions_with_environment_context(String catName, String db if (LOG.isInfoEnabled()) { for (Partition tmpPart : new_parts) { - LOG.info("New partition values:" + tmpPart.getValues()); + LOG.info("New partition values: catalog: {} database: {} table: {} partition: {}", + catName, db_name, tbl_name, tmpPart.getValues()); } } // all partitions are altered atomically @@ -6551,8 +6583,8 @@ public List get_partitions_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], - tbl_name, part_vals); + startPartitionFunction("get_partitions_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, max_parts, + part_vals); List ret = null; Exception ex = null; @@ -6628,6 +6660,7 @@ public GetPartitionsPsWithAuthResponse get_partitions_ps_with_auth_req(GetPartit .skipColumnSchemaForPartition(req.isSkipColumnSchemaForPartition()) .includeParamKeyPattern(req.getIncludeParamKeyPattern()) .excludeParamKeyPattern(req.getExcludeParamKeyPattern()) + .partNames(req.getPartNames()) .build()); GetPartitionsPsWithAuthResponse res = new GetPartitionsPsWithAuthResponse(); res.setPartitions(partitions); @@ -6644,8 +6677,8 @@ public List get_partition_names_ps(final String db_name, final String tbl_name, final List part_vals, final short max_parts) throws TException { String[] parsedDbName = parseDbName(db_name, conf); - startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tbl_name, part_vals); + startPartitionFunction("get_partitions_names_ps", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name, + max_parts, part_vals); fireReadTablePreEvent(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tbl_name); List ret = null; Exception ex = null; @@ -6836,8 +6869,7 @@ public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsReques String catName = request.isSetCatName() ? request.getCatName().toLowerCase() : getDefaultCatalog(conf); String dbName = request.getDbName().toLowerCase(); String tblName = request.getTblName().toLowerCase(); - startFunction("get_partitions_statistics_req", ": table=" + - TableName.getQualified(catName, dbName, tblName)); + startPartitionFunction("get_partitions_statistics_req", catName, dbName, tblName, request.getPartNames()); PartitionsStatsResult result = null; List lowerCaseColNames = new ArrayList<>(request.getColNames().size()); @@ -7253,8 +7285,8 @@ public List get_part_specs_by_filter(final String dbName, final S throws TException { String[] parsedDbName = parseDbName(dbName, conf); - startTableFunction("get_partitions_by_filter_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); - + startPartitionFunction("get_partitions_by_filter_pspec", parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName, + maxParts, filter); List partitionSpecs = null; try { Table table = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName); @@ -7319,7 +7351,10 @@ public PartitionsByExprResult get_partitions_by_expr( PartitionsByExprRequest req) throws TException { String dbName = req.getDbName(), tblName = req.getTblName(); String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf); - startTableFunction("get_partitions_by_expr", catName, dbName, tblName); + String expr = req.isSetExpr() ? Arrays.toString((req.getExpr())) : ""; + String defaultPartitionName = req.isSetDefaultPartitionName() ? req.getDefaultPartitionName() : ""; + int maxParts = req.getMaxParts(); + startPartitionFunction("get_partitions_by_expr", catName, dbName, tblName, maxParts, expr, defaultPartitionName); fireReadTablePreEvent(catName, dbName, tblName); PartitionsByExprResult ret = null; Exception ex = null; @@ -7352,8 +7387,9 @@ public int get_num_partitions_by_filter(final String dbName, if (parsedDbName[DB_NAME] == null || tblName == null) { throw new MetaException("The DB and table name cannot be null."); } - startTableFunction("get_num_partitions_by_filter", parsedDbName[CAT_NAME], - parsedDbName[DB_NAME], tblName); + startFunction("get_num_partitions_by_filter", + " : tbl=" + TableName.getQualified(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], tblName) + " Filter=" + + filter); int ret = -1; Exception ex = null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index 34af9d66b147..68b522ff7471 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hive.metastore; import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ArrayListMultimap; import com.google.common.collect.Lists; -import com.google.common.collect.Multimap; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hive.common.AcidMetaDataFile.DataFormat; @@ -64,6 +62,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collections; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -353,7 +352,7 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam // also the location field in partition parts = msdb.getPartitions(catName, dbname, name, -1); - Multimap columnStatsNeedUpdated = ArrayListMultimap.create(); + Map, List> partsByCols = new HashMap<>(); for (Partition part : parts) { String oldPartLoc = part.getSd().getLocation(); if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) { @@ -364,44 +363,57 @@ public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbnam } part.setDbName(newDbName); part.setTableName(newTblName); - List multiColStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, - part.getValues(), part.getSd().getCols(), oldt, part, null, null); - for (ColumnStatistics colStats : multiColStats) { - columnStatsNeedUpdated.put(part, colStats); + partsByCols.computeIfAbsent(part.getSd().getCols(), k -> new ArrayList<>()).add(part); + } + Map> engineToColStats = new HashMap<>(); + if (rename) { + // If this is the table rename, get the partition column statistics first + for (Map.Entry, List> entry : partsByCols.entrySet()) { + List colNames = entry.getKey().stream().map(fs -> fs.getName()).collect(Collectors.toList()); + List partNames = new ArrayList<>(); + for (Partition part : entry.getValue()) { + partNames.add(Warehouse.makePartName(oldt.getPartitionKeys(), part.getValues())); + } + List> colStats = + msdb.getPartitionColumnStatistics(catName, dbname, name, partNames, colNames); + for (List cs : colStats) { + if (cs != null && !cs.isEmpty()) { + String engine = cs.get(0).getEngine(); + cs.stream().forEach(stats -> { + stats.getStatsDesc().setDbName(newDbName); + stats.getStatsDesc().setTableName(newTblName); + String partName = stats.getStatsDesc().getPartName(); + engineToColStats.computeIfAbsent(engine, key -> new HashMap<>()).put(partName, stats); + }); + } + } } } // Do not verify stats parameters on a partitioned table. msdb.alterTable(catName, dbname, name, newt, null); + int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(), + MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); + String catalogName = catName; // alterPartition is only for changing the partition location in the table rename if (dataWasMoved) { - - int partsToProcess = parts.size(); - int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(), - MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); - int batchStart = 0; - while (partsToProcess > 0) { - int batchEnd = Math.min(batchStart + partitionBatchSize, parts.size()); - List partBatch = parts.subList(batchStart, batchEnd); - int partBatchSize = partBatch.size(); - partsToProcess -= partBatchSize; - batchStart += partBatchSize; - List> partValues = new ArrayList<>(partBatchSize); - for (Partition part : partBatch) { - partValues.add(part.getValues()); + Batchable.runBatched(partitionBatchSize, parts, new Batchable() { + @Override + public List run(List input) throws Exception { + msdb.alterPartitions(catalogName, newDbName, newTblName, + input.stream().map(Partition::getValues).collect(Collectors.toList()), + input, newt.getWriteId(), writeIdList); + return Collections.emptyList(); } - msdb.alterPartitions(catName, newDbName, newTblName, partValues, - partBatch, newt.getWriteId(), writeIdList); - } + }); } Deadline.checkTimeout(); - Table table = msdb.getTable(catName, newDbName, newTblName); - MTable mTable = msdb.ensureGetMTable(catName, newDbName, newTblName); - for (Entry partColStats : columnStatsNeedUpdated.entries()) { - ColumnStatistics newPartColStats = partColStats.getValue(); - newPartColStats.getStatsDesc().setDbName(newDbName); - newPartColStats.getStatsDesc().setTableName(newTblName); - msdb.updatePartitionColumnStatistics(table, mTable, newPartColStats, - partColStats.getKey().getValues(), writeIdList, newt.getWriteId()); + if (rename) { + for (Entry> entry : engineToColStats.entrySet()) { + // We will send ALTER_TABLE event after the db change, set listeners to null so that no extra + // event that could pollute the replication will be sent. + msdb.updatePartitionColumnStatisticsInBatch(entry.getValue(), oldt, + null, writeIdList, newt.getWriteId()); + } } } else { msdb.alterTable(catName, dbname, name, newt, writeIdList); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 97956660791a..c453df0ea1b9 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -32,10 +32,8 @@ import static org.apache.hadoop.hive.metastore.ColumnType.VARCHAR_TYPE_NAME; import java.sql.Connection; -import java.sql.Date; import java.sql.SQLException; import java.sql.Statement; -import java.sql.Timestamp; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -95,13 +93,13 @@ import org.apache.hadoop.hive.metastore.model.MConstraint; import org.apache.hadoop.hive.metastore.model.MCreationMetadata; import org.apache.hadoop.hive.metastore.model.MDatabase; -import org.apache.hadoop.hive.metastore.model.MFunction; import org.apache.hadoop.hive.metastore.model.MNotificationLog; import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.metastore.model.MPartition; import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege; +import org.apache.hadoop.hive.metastore.model.MTable; import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics; import org.apache.hadoop.hive.metastore.model.MWMResourcePlan; import org.apache.hadoop.hive.metastore.parser.ExpressionTree; @@ -111,6 +109,7 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; @@ -152,6 +151,7 @@ class MetaStoreDirectSql { private final int batchSize; private final boolean convertMapNullsToEmptyStrings; private final String defaultPartName; + private final boolean isTxnStatsEnabled; /** * Whether direct SQL can be used with the current datastore backing {@link #pm}. @@ -160,7 +160,7 @@ class MetaStoreDirectSql { private final boolean isAggregateStatsCacheEnabled; private final ImmutableMap fieldnameToTableName; private AggregateStatsCache aggrStatsCache; - private DirectSqlUpdateStat updateStat; + private DirectSqlUpdatePart directSqlUpdatePart; private DirectSqlInsertPart directSqlInsertPart; /** @@ -203,7 +203,8 @@ public MetaStoreDirectSql(PersistenceManager pm, Configuration conf, String sche batchSize = dbType.needsInBatching() ? 1000 : NO_BATCHING; } this.batchSize = batchSize; - this.updateStat = new DirectSqlUpdateStat(pm, conf, dbType, batchSize); + this.isTxnStatsEnabled = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_TXN_STATS_ENABLED); + this.directSqlUpdatePart = new DirectSqlUpdatePart(pm, conf, dbType, batchSize); ImmutableMap.Builder fieldNameToTableNameBuilder = new ImmutableMap.Builder<>(); @@ -535,6 +536,69 @@ public void addPartitions(List parts, List directSqlInsertPart.addPartitions(parts, partPrivilegesList, partColPrivilegesList); } + /** + * Alter partitions in batch using direct SQL + * @param table the target table + * @param partNames list of partition names + * @param newParts list of new partitions + * @param queryWriteIdList valid write id list + * @return + * @throws MetaException + */ + public List alterPartitions(MTable table, List partNames, + List newParts, String queryWriteIdList) throws MetaException { + List rows = Batchable.runBatched(batchSize, partNames, new Batchable() { + @Override + public List run(List input) throws Exception { + String filter = "" + PARTITIONS + ".\"PART_NAME\" in (" + makeParams(input.size()) + ")"; + List columns = Arrays.asList("\"PART_ID\"", "\"PART_NAME\"", "\"SD_ID\"", "\"WRITE_ID\""); + return getPartitionFieldsViaSqlFilter(table.getDatabase().getCatalogName(), table.getDatabase().getName(), + table.getTableName(), columns, filter, input, Collections.emptyList(), null); + } + }); + Map, Long> partValuesToId = new HashMap<>(); + Map partIdToSdId = new HashMap<>(); + Map partIdToWriteId = new HashMap<>(); + for (Object[] row : rows) { + Long partId = MetastoreDirectSqlUtils.extractSqlLong(row[0]); + Long sdId = MetastoreDirectSqlUtils.extractSqlLong(row[2]); + Long writeId = MetastoreDirectSqlUtils.extractSqlLong(row[3]); + partIdToSdId.put(partId, sdId); + partIdToWriteId.put(partId, writeId); + List partValues = Warehouse.getPartValuesFromPartName((String) row[1]); + partValuesToId.put(partValues, partId); + } + + boolean isTxn = TxnUtils.isTransactionalTable(table.getParameters()); + for (Partition newPart : newParts) { + Long partId = partValuesToId.get(newPart.getValues()); + boolean useOldWriteId = true; + // If transactional, add/update the MUPdaterTransaction + // for the current updater query. + if (isTxn) { + if (!isTxnStatsEnabled) { + StatsSetupConst.setBasicStatsState(newPart.getParameters(), StatsSetupConst.FALSE); + } else if (queryWriteIdList != null && newPart.getWriteId() > 0) { + // Check concurrent INSERT case and set false to the flag. + if (!ObjectStore.isCurrentStatsValidForTheQuery(newPart.getParameters(), + partIdToWriteId.get(partId), queryWriteIdList, true)) { + StatsSetupConst.setBasicStatsState(newPart.getParameters(), StatsSetupConst.FALSE); + LOG.info("Removed COLUMN_STATS_ACCURATE from the parameters of the partition " + + Warehouse.getQualifiedName(newPart) + " will be made persistent."); + } + useOldWriteId = false; + } + } + + if (useOldWriteId) { + newPart.setWriteId(partIdToWriteId.get(partId)); + } + } + + directSqlUpdatePart.alterPartitions(partValuesToId, partIdToSdId, newParts); + return newParts; + } + /** * Get partition names by using direct SQL queries. * @param filter filter to use with direct sql @@ -901,6 +965,28 @@ private List getPartitionIdsViaSqlFilter( String catName, String dbName, String tblName, String sqlFilter, List paramsForFilter, List joinsForFilter, Integer max) throws MetaException { + return getPartitionFieldsViaSqlFilter(catName, dbName, tblName, + Arrays.asList("\"PART_ID\""), sqlFilter, paramsForFilter, joinsForFilter, max); + } + + /** + * Get partition fields for the query using direct SQL queries, to avoid bazillion + * queries created by DN retrieving stuff for each object individually. + * @param catName MetaStore catalog name + * @param dbName MetaStore db name + * @param tblName MetaStore table name + * @param partColumns part fields want to get + * @param sqlFilter SQL filter to use. Better be SQL92-compliant. + * @param paramsForFilter params for ?-s in SQL filter text. Params must be in order. + * @param joinsForFilter if the filter needs additional join statement, they must be in + * this list. Better be SQL92-compliant. + * @param max The maximum number of partitions to return. + * @return List of partition objects. + */ + public List getPartitionFieldsViaSqlFilter( + String catName, String dbName, String tblName, List partColumns, String sqlFilter, + List paramsForFilter, List joinsForFilter, Integer max) + throws MetaException { boolean doTrace = LOG.isDebugEnabled(); final String dbNameLcase = dbName.toLowerCase(); final String tblNameLcase = tblName.toLowerCase(); @@ -908,16 +994,17 @@ private List getPartitionIdsViaSqlFilter( // We have to be mindful of order during filtering if we are not returning all partitions. String orderForFilter = (max != null) ? " order by " + MetastoreConf.getVar(conf, ConfVars.PARTITION_ORDER_EXPR) : ""; + String columns = partColumns.stream().map(col -> PARTITIONS + "." + col).collect(Collectors.joining(",")); String queryText = - "select " + PARTITIONS + ".\"PART_ID\" from " + PARTITIONS + "" - + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" " - + " and " + TBLS + ".\"TBL_NAME\" = ? " - + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " - + " and " + DBS + ".\"NAME\" = ? " - + join(joinsForFilter, ' ') - + " where " + DBS + ".\"CTLG_NAME\" = ? " - + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderForFilter; + "select " + columns + " from " + PARTITIONS + "" + + " inner join " + TBLS + " on " + PARTITIONS + ".\"TBL_ID\" = " + TBLS + ".\"TBL_ID\" " + + " and " + TBLS + ".\"TBL_NAME\" = ? " + + " inner join " + DBS + " on " + TBLS + ".\"DB_ID\" = " + DBS + ".\"DB_ID\" " + + " and " + DBS + ".\"NAME\" = ? " + + join(joinsForFilter, ' ') + + " where " + DBS + ".\"CTLG_NAME\" = ? " + + (StringUtils.isBlank(sqlFilter) ? "" : (" and " + sqlFilter)) + orderForFilter; Object[] params = new Object[paramsForFilter.size() + 3]; params[0] = tblNameLcase; params[1] = dbNameLcase; @@ -928,19 +1015,11 @@ private List getPartitionIdsViaSqlFilter( long start = doTrace ? System.nanoTime() : 0; try (QueryWrapper query = new QueryWrapper(pm.newQuery("javax.jdo.query.SQL", queryText))) { - List sqlResult = executeWithArray(query.getInnerQuery(), params, queryText, + List sqlResult = executeWithArray(query.getInnerQuery(), params, queryText, ((max == null) ? -1 : max.intValue())); long queryTime = doTrace ? System.nanoTime() : 0; MetastoreDirectSqlUtils.timingTrace(doTrace, queryText, start, queryTime); - final List result; - if (sqlResult.isEmpty()) { - result = Collections.emptyList(); // no partitions, bail early. - } else { - result = new ArrayList<>(sqlResult.size()); - for (Object fields : sqlResult) { - result.add(MetastoreDirectSqlUtils.extractSqlLong(fields)); - } - } + List result = new ArrayList<>(sqlResult); return result; } } @@ -1323,12 +1402,13 @@ public static FilterType fromClass(Object value){ @Override public void visit(LeafNode node) throws MetaException { int partColCount = partitionKeys.size(); - int partColIndex = node.getPartColIndexForFilter(partitionKeys, filterBuffer); + int partColIndex = LeafNode.getPartColIndexForFilter(node.keyName, partitionKeys, filterBuffer); if (filterBuffer.hasError()) { return; } - String colTypeStr = ColumnType.getTypeName(partitionKeys.get(partColIndex).getType()); + FieldSchema partCol = partitionKeys.get(partColIndex); + String colTypeStr = ColumnType.getTypeName(partCol.getType()); FilterType colType = FilterType.fromType(colTypeStr); if (colType == FilterType.Invalid) { filterBuffer.setError("Filter pushdown not supported for type " + colTypeStr); @@ -1341,29 +1421,33 @@ public void visit(LeafNode node) throws MetaException { return; } + String nodeValue0 = "?"; // if Filter.g does date parsing for quoted strings, we'd need to verify there's no // type mismatch when string col is filtered by a string that looks like date. - if (colType == FilterType.Date && valType == FilterType.String) { - // Filter.g cannot parse a quoted date; try to parse date here too. + if (colType == FilterType.Date) { try { - nodeValue = MetaStoreUtils.convertStringToDate((String)nodeValue); + nodeValue = MetaStoreUtils.normalizeDate((String) nodeValue); valType = FilterType.Date; - } catch (Exception pe) { // do nothing, handled below - types will mismatch + if (dbType.isPOSTGRES() || dbType.isORACLE()) { + nodeValue0 = "date '" + nodeValue + "'"; + nodeValue = null; + } + } catch (Exception e) { // do nothing, handled below - types will mismatch + } + } else if (colType == FilterType.Timestamp) { + if (dbType.isDERBY() || dbType.isMYSQL()) { + filterBuffer.setError("Filter pushdown on timestamp not supported for " + dbType.dbType); + return; + } + try { + MetaStoreUtils.convertStringToTimestamp((String) nodeValue); + valType = FilterType.Timestamp; + if (dbType.isPOSTGRES() || dbType.isORACLE()) { + nodeValue0 = "timestamp '" + nodeValue + "'"; + nodeValue = null; + } + } catch (Exception e) { //nodeValue could be '__HIVE_DEFAULT_PARTITION__' } - } - - if (colType == FilterType.Timestamp && valType == FilterType.String) { - nodeValue = MetaStoreUtils.convertStringToTimestamp((String)nodeValue); - valType = FilterType.Timestamp; - } - - // We format it so we are sure we are getting the right value - if (valType == FilterType.Date) { - // Format - nodeValue = MetaStoreUtils.convertDateToString((Date)nodeValue); - } else if (valType == FilterType.Timestamp) { - //format - nodeValue = MetaStoreUtils.convertTimestampToString((Timestamp) nodeValue); } boolean isDefaultPartition = (valType == FilterType.String) && defaultPartName.equals(nodeValue); @@ -1393,8 +1477,7 @@ public void visit(LeafNode node) throws MetaException { // Build the filter and add parameters linearly; we are traversing leaf nodes LTR. String tableValue = "\"FILTER" + partColIndex + "\".\"PART_KEY_VAL\""; - String nodeValue0 = "?"; - if (node.isReverseOrder) { + if (node.isReverseOrder && nodeValue != null) { params.add(nodeValue); } String tableColumn = tableValue; @@ -1424,14 +1507,9 @@ public void visit(LeafNode node) throws MetaException { params.add(catName.toLowerCase()); } tableValue += " then " + tableValue0 + " else null end)"; - - if (valType == FilterType.Date) { - tableValue = dbType.toDate(tableValue); - } else if (valType == FilterType.Timestamp) { - tableValue = dbType.toTimestamp(tableValue); - } } - if (!node.isReverseOrder) { + + if (!node.isReverseOrder && nodeValue != null) { params.add(nodeValue); } @@ -1439,10 +1517,43 @@ public void visit(LeafNode node) throws MetaException { if (node.operator == Operator.LIKE) { nodeValue0 = nodeValue0 + " ESCAPE '\\' "; } + String filter = node.isReverseOrder + ? nodeValue0 + " " + node.operator.getSqlOp() + " " + tableValue + : tableValue + " " + node.operator.getSqlOp() + " " + nodeValue0; + // For equals and not-equals filter, we can add partition name filter to improve performance. + boolean isOpEquals = Operator.isEqualOperator(node.operator); + if (isOpEquals || Operator.isNotEqualOperator(node.operator)) { + Map partKeyToVal = new HashMap<>(); + partKeyToVal.put(partCol.getName(), node.value.toString()); + String escapedNameFragment = Warehouse.makePartName(partKeyToVal, false); + if (colType == FilterType.Date) { + // Some engines like Pig will record both date and time values, in which case we need + // match PART_NAME by like clause. + escapedNameFragment += "%"; + } + if (colType != FilterType.Date && partColCount == 1) { + // Case where partition column type is not date and there is no other partition columns + params.add(escapedNameFragment); + filter += " and " + PARTITIONS + ".\"PART_NAME\"" + (isOpEquals ? " =? " : " !=? "); + } else { + if (partColCount == 1) { + // Case where partition column type is date and there is no other partition columns + params.add(escapedNameFragment); + } else if (partColIndex + 1 == partColCount) { + // Case where the partition column is at the end of the name. + params.add("%/" + escapedNameFragment); + } else if (partColIndex == 0) { + // Case where the partition column is at the beginning of the name. + params.add(escapedNameFragment + "/%"); + } else { + // Case where the partition column is in the middle of the name. + params.add("%/" + escapedNameFragment + "/%"); + } + filter += " and " + PARTITIONS + ".\"PART_NAME\"" + (isOpEquals ? " like ? " : " not like ? "); + } + } - filterBuffer.append(node.isReverseOrder - ? "(" + nodeValue0 + " " + node.operator.getSqlOp() + " " + tableValue + ")" - : "(" + tableValue + " " + node.operator.getSqlOp() + " " + nodeValue0 + ")"); + filterBuffer.append("(" + filter + ")"); } } @@ -3056,12 +3167,15 @@ public Map> updatePartitionColumnStatisticsBatch( ColumnStatistics colStats = (ColumnStatistics) entry.getValue(); numStats += colStats.getStatsObjSize(); } - long csId = updateStat.getNextCSIdForMPartitionColumnStatistics(numStats); - return updateStat.updatePartitionColumnStatistics(partColStatsMap, tbl, csId, validWriteIds, writeId, listeners); + long csId = directSqlUpdatePart.getNextCSIdForMPartitionColumnStatistics(numStats); + return directSqlUpdatePart.updatePartitionColumnStatistics(partColStatsMap, tbl, csId, validWriteIds, writeId, listeners); } public List getFunctions(String catName) throws MetaException { List funcIds = getFunctionIds(catName); + if (funcIds.isEmpty()) { + return Collections.emptyList(); // no functions, bail early. + } // Get full objects. For Oracle/etc. do it in batches. return Batchable.runBatched(batchSize, funcIds, new Batchable() { @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java index ff92ab86d420..89c78aa0af33 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/MetastoreDefaultTransformer.java @@ -772,7 +772,7 @@ private Database getDbForTable(Table oldTable) throws MetaException { return hmsHandler.get_database_core(oldTable.getCatName(), oldTable.getDbName()); } catch (NoSuchObjectException e) { throw new MetaException( - "Database " + oldTable.getTableName() + " for table " + oldTable.getTableName() + " could not be found"); + "Database " + oldTable.getDbName() + " for table " + oldTable.getTableName() + " could not be found"); } } @@ -895,7 +895,7 @@ private Table validateTablePaths(Table table) throws MetaException { try { db = hmsHandler.get_database_core(table.getCatName(), table.getDbName()); } catch (NoSuchObjectException e) { - throw new MetaException("Database " + table.getTableName() + " for table " + table.getTableName() + " could not be found"); + throw new MetaException("Database " + table.getDbName() + " for table " + table.getTableName() + " could not be found"); } if (TableType.MANAGED_TABLE.name().equals(table.getTableType())) { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java index 79bc4b49d1eb..36d271e15220 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/Msck.java @@ -125,7 +125,7 @@ public void updateExpressionProxy(String proxyClass) throws TException { * @param msckInfo Information about the tables and partitions we want to check for. * @return Returns 0 when execution succeeds and above 0 if it fails. */ - public int repair(MsckInfo msckInfo) { + public int repair(MsckInfo msckInfo) throws TException, MetastoreException, IOException { CheckResult result = null; List repairOutput = new ArrayList<>(); String qualifiedTableName = null; @@ -271,6 +271,7 @@ public int repair(MsckInfo msckInfo) { } catch (Exception e) { LOG.warn("Failed to run metacheck: ", e); success = false; + throw e; } finally { if (result != null) { logResult(result); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java index 436ebd932acc..a810c9cc695f 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -25,10 +25,12 @@ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; import java.io.IOException; +import java.math.BigInteger; import java.net.InetAddress; import java.net.URI; import java.nio.ByteBuffer; import java.sql.Connection; +import java.sql.ResultSet; import java.sql.SQLException; import java.sql.SQLIntegrityConstraintViolationException; import java.sql.Statement; @@ -255,6 +257,8 @@ import org.apache.hadoop.hive.metastore.properties.CachingPropertyStore; import org.apache.hadoop.hive.metastore.properties.PropertyStore; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.tools.metatool.IcebergTableMetadataHandler; +import org.apache.hadoop.hive.metastore.tools.metatool.MetadataTableSummary; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; @@ -3890,22 +3894,27 @@ public List listPartitionsPsWithAuth(String catName, String db_name, String userName = args.getUserName(); List groupNames = args.getGroupNames(); List part_vals = args.getPart_vals(); + List partNames = args.getPartNames(); + boolean isAcidTable = TxnUtils.isAcidTable(mtbl.getParameters()); boolean getauth = null != userName && null != groupNames && "TRUE".equalsIgnoreCase( mtbl.getParameters().get("PARTITION_LEVEL_PRIVILEGE")); - - if (canTryDirectSQL(part_vals)) { + // When partNames is given, sending to JDO directly. + if (canTryDirectSQL(part_vals) && partNames == null) { LOG.info( "Redirecting to directSQL enabled API: db: {} tbl: {} partVals: {}", db_name, tbl_name, part_vals); partitions = getPartitions(catName, db_name, tbl_name, args); } else { - Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name, - part_vals, max_parts, null); - boolean isAcidTable = TxnUtils.isAcidTable(mtbl.getParameters()); - for (Object o : parts) { - Partition part = convertToPart(catName, db_name, tbl_name, (MPartition) o, isAcidTable, args); - partitions.add(part); + if (partNames != null) { + partitions.addAll(getPartitionsViaOrmFilter(catName, db_name, tbl_name, isAcidTable, args)); + } else { + Collection parts = getPartitionPsQueryResults(catName, db_name, tbl_name, + part_vals, max_parts, null); + for (Object o : parts) { + Partition part = convertToPart(catName, db_name, tbl_name, (MPartition) o, isAcidTable, args); + partitions.add(part); + } } } if (getauth) { @@ -4394,7 +4403,7 @@ protected boolean canUseDirectSql(GetHelper ctx) throws MetaException { protected abstract String describeResult(); protected abstract T getSqlResult(GetHelper ctx) throws MetaException; protected abstract T getJdoResult( - GetHelper ctx) throws MetaException, NoSuchObjectException; + GetHelper ctx) throws MetaException, NoSuchObjectException, InvalidObjectException; public T run(boolean initTable) throws MetaException, NoSuchObjectException { try { @@ -4905,7 +4914,8 @@ private String makeQueryFilterString(String catName, String dbName, Table table, params.put("catName", catName); } - tree.generateJDOFilterFragment(getConf(), params, queryBuilder, table != null ? table.getPartitionKeys() : null); + tree.accept(new ExpressionTree.JDOFilterGenerator(getConf(), + table != null ? table.getPartitionKeys() : null, queryBuilder, params)); if (queryBuilder.hasError()) { assert !isValidatedFilter; LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); @@ -4925,7 +4935,7 @@ private String makeQueryFilterString(String catName, String dbName, String tblNa params.put("t1", tblName); params.put("t2", dbName); params.put("t3", catName); - tree.generateJDOFilterFragment(getConf(), params, queryBuilder, partitionKeys); + tree.accept(new ExpressionTree.JDOFilterGenerator(getConf(), partitionKeys, queryBuilder, params)); if (queryBuilder.hasError()) { assert !isValidatedFilter; LOG.debug("JDO filter pushdown cannot be used: {}", queryBuilder.getErrorMessage()); @@ -5089,26 +5099,22 @@ public static String verifyStatsChangeCtx(String fullTableName, Map 0) { return null; // We have txn context. } - String oldVal = oldP == null ? null : oldP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - String newVal = newP == null ? null : newP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); - // We don't need txn context is that stats state is not being changed. - if (StringUtils.isEmpty(oldVal) && StringUtils.isEmpty(newVal)) { + + if (!StatsSetupConst.areBasicStatsUptoDate(newP)) { + // The validWriteIds can be absent, for example, in case of Impala alter. + // If the new value is invalid, then we don't care, let the alter operation go ahead. return null; } + + String oldVal = oldP == null ? null : oldP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); + String newVal = newP == null ? null : newP.get(StatsSetupConst.COLUMN_STATS_ACCURATE); if (StringUtils.equalsIgnoreCase(oldVal, newVal)) { if (!isColStatsChange) { return null; // No change in col stats or parameters => assume no change. } - // Col stats change while json stays "valid" implies stats change. If the new value is invalid, - // then we don't care. This is super ugly and idiotic. - // It will all become better when we get rid of JSON and store a flag and write ID per stats. - if (!StatsSetupConst.areBasicStatsUptoDate(newP)) { - return null; - } } + // Some change to the stats state is being made; it can only be made with a write ID. - // Note - we could do this: if (writeId > 0 && (validWriteIds != null || !StatsSetupConst.areBasicStatsUptoDate(newP))) { return null; - // However the only way ID list can be absent is if WriteEntity wasn't generated for the alter, which is a separate bug. return "Cannot change stats state for a transactional table " + fullTableName + " without " + "providing the transactional write state for verification (new write ID " + writeId + ", valid write IDs " + validWriteIds + "; current state " + oldVal + "; new" + @@ -5256,91 +5262,114 @@ public List alterPartitions(String catName, String dbName, String tbl List> part_vals, List newParts, long writeId, String queryWriteIdList) throws InvalidObjectException, MetaException { - boolean success = false; - Exception e = null; List results = new ArrayList<>(newParts.size()); if (newParts.isEmpty()) { return results; } + catName = normalizeIdentifier(catName); + dbName = normalizeIdentifier(dbName); + tblName = normalizeIdentifier(tblName); + + boolean success = false; try { openTransaction(); - MTable table = this.getMTable(catName, dbName, tblName); - if (table == null) { - throw new NoSuchObjectException( - TableName.getQualified(catName, dbName, tblName) + " table not found"); + MTable table = ensureGetMTable(catName, dbName, tblName); + // Validate new parts: StorageDescriptor and SerDeInfo must be set in Partition. + if (!TableType.VIRTUAL_VIEW.name().equals(table.getTableType())) { + for (Partition newPart : newParts) { + if (!newPart.isSetSd() || !newPart.getSd().isSetSerdeInfo()) { + throw new InvalidObjectException("Partition does not set storageDescriptor or serdeInfo."); + } + } } + if (writeId > 0) { + newParts.forEach(newPart -> newPart.setWriteId(writeId)); + } + + List partCols = convertToFieldSchemas(table.getPartitionKeys()); List partNames = new ArrayList<>(); for (List partVal : part_vals) { - partNames.add( - Warehouse.makePartName(convertToFieldSchemas(table.getPartitionKeys()), partVal) - ); + partNames.add(Warehouse.makePartName(partCols, partVal)); } - catName = normalizeIdentifier(catName); - dbName = normalizeIdentifier(dbName); - tblName = normalizeIdentifier(tblName); - List mPartitionList; - - try (Query query = pm.newQuery(MPartition.class, - "table.tableName == t1 && table.database.name == t2 && t3.contains(partitionName) " + - " && table.database.catalogName == t4")) { - query.declareParameters("java.lang.String t1, java.lang.String t2, java.util.Collection t3, " - + "java.lang.String t4"); - mPartitionList = (List) query.executeWithArray(tblName, dbName, partNames, catName); - pm.retrieveAll(mPartitionList); - - if (mPartitionList.size() > newParts.size()) { - throw new MetaException("Expecting only one partition but more than one partitions are found."); + results = new GetListHelper(catName, dbName, tblName, true, true) { + @Override + protected List getSqlResult(GetHelper> ctx) + throws MetaException { + return directSql.alterPartitions(table, partNames, newParts, queryWriteIdList); } - Map, MPartition> mPartsMap = new HashMap(); - for (MPartition mPartition : mPartitionList) { - mPartsMap.put(mPartition.getValues(), mPartition); + @Override + protected List getJdoResult(GetHelper> ctx) + throws MetaException, InvalidObjectException { + return alterPartitionsViaJdo(table, partNames, newParts, queryWriteIdList); } + }.run(false); - Set oldCds = new HashSet<>(); - Ref oldCdRef = new Ref<>(); - for (Partition tmpPart : newParts) { - if (!tmpPart.getDbName().equalsIgnoreCase(dbName)) { - throw new MetaException("Invalid DB name : " + tmpPart.getDbName()); - } - - if (!tmpPart.getTableName().equalsIgnoreCase(tblName)) { - throw new MetaException("Invalid table name : " + tmpPart.getDbName()); - } - - if (writeId > 0) { - tmpPart.setWriteId(writeId); - } - oldCdRef.t = null; - Partition result = alterPartitionNoTxn(catName, dbName, tblName, mPartsMap.get(tmpPart.getValues()), - tmpPart, queryWriteIdList, oldCdRef, table); - results.add(result); - if (oldCdRef.t != null) { - oldCds.add(oldCdRef.t); - } - } - for (MColumnDescriptor oldCd : oldCds) { - removeUnusedColumnDescriptor(oldCd); - } - } // commit the changes success = commitTransaction(); } catch (Exception exception) { - e = exception; - LOG.error("Alter failed", e); + LOG.error("Alter failed", exception); + throw new MetaException(exception.getMessage()); } finally { if (!success) { rollbackTransaction(); - MetaException metaException = new MetaException( - "The transaction for alter partition did not commit successfully."); - if (e != null) { - metaException.initCause(e); + } + } + return results; + } + + private List alterPartitionsViaJdo(MTable table, List partNames, + List newParts, String queryWriteIdList) + throws MetaException, InvalidObjectException { + String catName = table.getDatabase().getCatalogName(); + String dbName = table.getDatabase().getName(); + String tblName = table.getTableName(); + List results = new ArrayList<>(newParts.size()); + List mPartitionList; + + try (QueryWrapper query = new QueryWrapper(pm.newQuery(MPartition.class, + "table.tableName == t1 && table.database.name == t2 && t3.contains(partitionName) " + + " && table.database.catalogName == t4"))) { + query.declareParameters("java.lang.String t1, java.lang.String t2, java.util.Collection t3, " + + "java.lang.String t4"); + mPartitionList = (List) query.executeWithArray(tblName, dbName, partNames, catName); + pm.retrieveAll(mPartitionList); + + if (mPartitionList.size() > newParts.size()) { + throw new MetaException("Expecting only one partition but more than one partitions are found."); + } + + Map, MPartition> mPartsMap = new HashMap(); + for (MPartition mPartition : mPartitionList) { + mPartsMap.put(mPartition.getValues(), mPartition); + } + + Set oldCds = new HashSet<>(); + Ref oldCdRef = new Ref<>(); + for (Partition tmpPart : newParts) { + if (!tmpPart.getDbName().equalsIgnoreCase(dbName)) { + throw new MetaException("Invalid DB name : " + tmpPart.getDbName()); + } + + if (!tmpPart.getTableName().equalsIgnoreCase(tblName)) { + throw new MetaException("Invalid table name : " + tmpPart.getDbName()); + } + + oldCdRef.t = null; + Partition result = alterPartitionNoTxn(catName, dbName, tblName, + mPartsMap.get(tmpPart.getValues()), tmpPart, queryWriteIdList, oldCdRef, table); + results.add(result); + if (oldCdRef.t != null) { + oldCds.add(oldCdRef.t); } - throw metaException; + } + for (MColumnDescriptor oldCd : oldCds) { + removeUnusedColumnDescriptor(oldCd); } } + return results; } @@ -9918,6 +9947,199 @@ public UpdateSerdeURIRetVal updateSerdeURI(URI oldLoc, URI newLoc, String serdeP } } + /** The following APIs + * + * - getMetadataSummary + * + * is used by HiveMetaTool. + */ + + /** + * Using resultSet to read the HMS_SUMMARY table. + * @param catalogFilter the optional catalog name filter + * @param dbFilter the optional database name filter + * @param tableFilter the optional table name filter + * @return MetadataSummary + * @throws SQLException + */ + public List getMetadataSummary(String catalogFilter, String dbFilter, String tableFilter) throws SQLException { + ArrayList metadataTableSummaryList = new ArrayList(); + + ResultSet rs = null; + Statement stmt = null; + // Fetch the metrics from iceberg manifest for iceberg tables in hive + IcebergTableMetadataHandler icebergHandler = null; + Map icebergTableSummaryMap = null; + + try { + icebergHandler = new IcebergTableMetadataHandler(conf); + icebergTableSummaryMap = icebergHandler.getIcebergTables(); + } catch (Exception e) { + LOG.error("Unable to fetch metadata from iceberg manifests", e); + } + + List querySet = sqlGenerator.getCreateQueriesForMetastoreSummary(); + if (querySet == null) { + LOG.warn("Metadata summary has not been implemented for dbtype {}", sqlGenerator.getDbProduct().dbType); + return null; + } + + try { + JDOConnection jdoConn = null; + jdoConn = pm.getDataStoreConnection(); + stmt = ((Connection) jdoConn.getNativeConnection()).createStatement(); + long startTime = System.currentTimeMillis(); + + for (String q: querySet) { + stmt.execute(q); + } + long endTime = System.currentTimeMillis(); + LOG.info("Total query time for generating HMS Summary: {} (ms)", (((long)endTime) - ((long)startTime))); + } catch (SQLException e) { + LOG.error("Exception during computing HMS Summary", e); + throw e; + } + + final String query = sqlGenerator.getSelectQueryForMetastoreSummary(); + try { + String tblName, dbName, ctlgName, tblType, fileType, compressionType, partitionColumn, writeFormatDefault, transactionalProperties; + int colCount, arrayColCount, structColCount, mapColCount; + Integer partitionCnt; + BigInteger totalSize, sizeNumRows, sizeNumFiles; + stmt.setFetchSize(0); + rs = stmt.executeQuery(query); + while (rs.next()) { + tblName = rs.getString("TBL_NAME"); + dbName = rs.getString("NAME"); + ctlgName = rs.getString("CTLG"); + if(ctlgName == null) ctlgName = "null"; + colCount = rs.getInt("TOTAL_COLUMN_COUNT"); + arrayColCount = rs.getInt("ARRAY_COLUMN_COUNT"); + structColCount = rs.getInt("STRUCT_COLUMN_COUNT"); + mapColCount = rs.getInt("MAP_COLUMN_COUNT"); + tblType = rs.getString("TBL_TYPE"); + fileType = rs.getString("SLIB"); + if (fileType != null) fileType = extractFileFormat(fileType); + compressionType = rs.getString("IS_COMPRESSED"); + if (compressionType.equals("0") || compressionType.equals("f")) compressionType = "None"; + partitionColumn = rs.getString("PARTITION_COLUMN"); + int partitionColumnCount = (partitionColumn == null) ? 0 : (partitionColumn.split(",")).length; + partitionCnt = rs.getInt("PARTITION_CNT"); + + totalSize = BigInteger.valueOf(rs.getLong("TOTAL_SIZE")); + sizeNumRows = BigInteger.valueOf(rs.getLong("NUM_ROWS")); + sizeNumFiles = BigInteger.valueOf(rs.getLong("NUM_FILES")); + + writeFormatDefault = rs.getString("WRITE_FORMAT_DEFAULT"); + if (writeFormatDefault == null) writeFormatDefault = "null"; + transactionalProperties = rs.getString("TRANSACTIONAL_PROPERTIES"); + if (transactionalProperties == null) transactionalProperties = "null"; + + // for iceberg tables, overwrite the metadata by the metadata fetched in HMSSummaryIcebergHandler + if (fileType != null && fileType.equals("iceberg") && icebergHandler.isEnabled() && icebergTableSummaryMap != null) { + // if the new metadata is not null or 0, overwrite the old metadata + MetadataTableSummary icebergTableSummary = icebergTableSummaryMap.get(tblName); + ctlgName = icebergTableSummary.getCtlgName() != null ? icebergTableSummary.getCtlgName() : ctlgName; + dbName = icebergTableSummary.getDbName() != null ? icebergTableSummary.getDbName() : dbName; + colCount = icebergTableSummary.getColCount() != 0 ? icebergTableSummary.getColCount() : colCount; + partitionColumnCount = icebergTableSummary.getPartitionColumnCount() != 0 ? icebergTableSummary.getPartitionColumnCount() : partitionColumnCount; + totalSize = icebergTableSummary.getTotalSize() != null ? icebergTableSummary.getTotalSize() : totalSize; + sizeNumRows = icebergTableSummary.getSizeNumRows() != null ? icebergTableSummary.getSizeNumRows(): sizeNumRows; + sizeNumFiles = icebergTableSummary.getSizeNumFiles() != null ? icebergTableSummary.getSizeNumFiles(): sizeNumFiles; + if (writeFormatDefault.equals("null")){ + fileType = "parquet"; + } + else { + fileType = writeFormatDefault; + } + tblType = "ICEBERG"; + } + + if (tblType.equals("EXTERNAL_TABLE")){ + if (fileType.equals("parquet")){ + tblType = "HIVE_EXTERNAL"; + } + else if (fileType.equals("jdbc")){ + tblType = "JDBC"; + } + else if (fileType.equals("kudu")){ + tblType = "KUDU"; + } + else if (fileType.equals("hbase")){ + tblType = "HBASE"; + } else { + tblType = "HIVE_EXTERNAL"; + } + } + + if (tblType.equals("MANAGED_TABLE")){ + if (transactionalProperties == "insert_only"){ + tblType = "HIVE_ACID_INSERT_ONLY"; + } + else { + tblType = "HIVE_ACID_FULL"; + } + } + + MetadataTableSummary summary = new MetadataTableSummary(ctlgName, dbName, tblName, colCount, + partitionColumnCount, partitionCnt, totalSize, sizeNumRows, sizeNumFiles, tblType, + fileType, compressionType, arrayColCount, structColCount, mapColCount); + metadataTableSummaryList.add(summary); + } + } catch (Exception e) { + String msg = "Runtime exception while running the query " + query; + LOG.error(msg, e); + throw e; + } finally { + if (rs != null) { + rs.close(); + } + if (stmt != null) { + stmt.close(); + } + } + return metadataTableSummaryList; + } + + /** + * Helper method for getMetadataSummary. Extracting the format of the file from the long string. + * @param fileFormat - fileFormat. A long String which indicates the type of the file. + * @return String A short String which indicates the type of the file. + */ + private static String extractFileFormat(String fileFormat) { + String lowerCaseFileFormat = null; + if (fileFormat == null) { + return "NULL"; + } + lowerCaseFileFormat = fileFormat.toLowerCase(); + if (lowerCaseFileFormat.contains("iceberg")) { + fileFormat = "iceberg"; + } else if (lowerCaseFileFormat.contains("parquet")) { + fileFormat = "parquet"; + } else if (lowerCaseFileFormat.contains("orc")) { + fileFormat = "orc"; + } else if (lowerCaseFileFormat.contains("avro")) { + fileFormat = "avro"; + } else if (lowerCaseFileFormat.contains("json")) { + fileFormat = "json"; + } else if (lowerCaseFileFormat.contains("hbase")) { + fileFormat = "hbase"; + } else if (lowerCaseFileFormat.contains("jdbc")) { + fileFormat = "jdbc"; + } else if (lowerCaseFileFormat.contains("kudu")) { + fileFormat = "kudu"; + } else if ((lowerCaseFileFormat.contains("text")) || (lowerCaseFileFormat.contains("lazysimple"))) { + fileFormat = "text"; + } else if (lowerCaseFileFormat.contains("sequence")) { + fileFormat = "sequence"; + } else if (lowerCaseFileFormat.contains("passthrough")) { + fileFormat = "passthrough"; + } else if (lowerCaseFileFormat.contains("opencsv")) { + fileFormat = "openCSV"; + } + return fileFormat; + } + private void writeMTableColumnStatistics(Table table, MTableColumnStatistics mStatsObj, MTableColumnStatistics oldStats) throws MetaException { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java index 30a7ffcc3f60..26864d0bb958 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/PartitionIterable.java @@ -164,6 +164,9 @@ public PartitionIterable(Collection ptnsProvided) { * a Hive object and a table object, and a partial partition spec. */ public PartitionIterable(IMetaStoreClient msc, Table table, int batch_size) throws MetastoreException { + if (batch_size < 1) { + throw new MetastoreException("Invalid batch size for partition iterable. Please use a batch size greater than 0"); + } this.currType = Type.LAZY_FETCH_PARTITIONS; this.msc = msc; this.table = table; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java index 5848abd2064a..163c855833e2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/StatObjectConverter.java @@ -288,11 +288,13 @@ public static String getUpdatedColumnSql(MPartitionColumnStatistics mStatsObj) { if (mStatsObj.getNumNulls() != null) { setStmt.append("\"NUM_NULLS\" = ? ,"); } - setStmt.append("\"ENGINE\" = ? "); + setStmt.append("\"ENGINE\" = ? ,"); + setStmt.append("\"DB_NAME\" = ? ,"); + setStmt.append("\"TABLE_NAME\" = ? "); return setStmt.toString(); } - public static void initUpdatedColumnStatement(MPartitionColumnStatistics mStatsObj, + public static int initUpdatedColumnStatement(MPartitionColumnStatistics mStatsObj, PreparedStatement pst) throws SQLException { int colIdx = 1; if (mStatsObj.getAvgColLen() != null) { @@ -339,6 +341,9 @@ public static void initUpdatedColumnStatement(MPartitionColumnStatistics mStatsO pst.setObject(colIdx++, mStatsObj.getNumNulls()); } pst.setString(colIdx++, mStatsObj.getEngine()); + pst.setString(colIdx++, mStatsObj.getDbName()); + pst.setString(colIdx++, mStatsObj.getTableName()); + return colIdx; } public static ColumnStatisticsObj getTableColumnStatisticsObj( diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java index f728f9a0b11d..265a43d6756d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/events/CommitCompactionEvent.java @@ -22,8 +22,7 @@ import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.hive.metastore.IHMSHandler; import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.TxnType; -import org.apache.hadoop.hive.metastore.txn.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; /** * CommitCompactionEvent diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/HouseKeepingTasks.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/HouseKeepingTasks.java index 5db4bba3f4e5..3a4414fd0045 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/HouseKeepingTasks.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/HouseKeepingTasks.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.MetastoreTaskThread; import org.apache.hadoop.hive.metastore.ThreadPool; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.txn.service.CompactionHouseKeeperService; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import java.util.ArrayList; @@ -58,9 +59,15 @@ public List getRemoteOnlyTasks() throws Exception { MetastoreConf.ConfVars.METASTORE_HOUSEKEEPING_THREADS_ON)) { return remoteOnlyTasks; } + boolean isCompactorEnabled = MetastoreConf.getBoolVar(configuration, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON) + || MetastoreConf.getBoolVar(configuration, MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON); + Collection taskNames = MetastoreConf.getStringCollection(configuration, MetastoreConf.ConfVars.TASK_THREADS_REMOTE_ONLY); for (String taskName : taskNames) { + if (CompactionHouseKeeperService.class.getName().equals(taskName) && !isCompactorEnabled) { + continue; + } MetastoreTaskThread task = JavaUtils.newInstance(JavaUtils.getClass(taskName, MetastoreTaskThread.class)); remoteOnlyTasks.add(task); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionContext.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionContext.java index 3c01d8030ca9..a3652d1c0019 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionContext.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionContext.java @@ -42,12 +42,12 @@ public class LeaderElectionContext { * For those tasks which belong to the same type, they will be running in the same leader. */ public enum TTYPE { - HOUSEKEEPING(new TableName(Warehouse.DEFAULT_CATALOG_NAME, "sys", - "metastore_housekeeping_leader"), "housekeeping"), - WORKER(new TableName(Warehouse.DEFAULT_CATALOG_NAME, "sys", - "metastore_worker_leader"), "compactor_worker"), - ALWAYS_TASKS(new TableName(Warehouse.DEFAULT_CATALOG_NAME, "sys", - "metastore_always_tasks_leader"), "always_tasks"); + HOUSEKEEPING(new TableName(Warehouse.DEFAULT_CATALOG_NAME, "__METASTORE_LEADER_ELECTION__", + "metastore_housekeeping"), "housekeeping"), + WORKER(new TableName(Warehouse.DEFAULT_CATALOG_NAME, "__METASTORE_LEADER_ELECTION__", + "metastore_compactor_worker"), "compactor_worker"), + ALWAYS_TASKS(new TableName(Warehouse.DEFAULT_CATALOG_NAME, "__METASTORE_LEADER_ELECTION__", + "metastore_always_tasks"), "always_tasks"); // Mutex of TTYPE, which can be a nonexistent table private final TableName mutex; // Name of TTYPE @@ -127,9 +127,10 @@ public void start() throws Exception { throw new RuntimeException("Error claiming to be leader: " + leaderElection.getName(), e); } }); + daemon.setName("Metastore Election " + leaderElection.getName()); + daemon.setDaemon(true); + if (startAsDaemon) { - daemon.setName("Leader-Election-" + leaderElection.getName()); - daemon.setDaemon(true); daemon.start(); } else { daemon.run(); @@ -154,7 +155,13 @@ public static Object getLeaderMutex(Configuration conf, TTYPE ttype, String serv case "host": return servHost; case "lock": - return ttype.getTableName(); + TableName mutex = ttype.getTableName(); + String namespace = + MetastoreConf.getVar(conf, MetastoreConf.ConfVars.METASTORE_HOUSEKEEPING_LEADER_LOCK_NAMESPACE); + if (StringUtils.isNotEmpty(namespace)) { + return new TableName(mutex.getCat(), namespace, mutex.getTable()); + } + return mutex; default: throw new UnsupportedOperationException(method + " not supported for leader election"); } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionFactory.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionFactory.java index 51dce28ac50c..5055ad8a0034 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionFactory.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaderElectionFactory.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hive.metastore.leader; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; @@ -26,7 +28,7 @@ */ public class LeaderElectionFactory { - public static LeaderElection create(Configuration conf) { + public static LeaderElection create(Configuration conf) throws IOException { String method = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.METASTORE_HOUSEKEEPING_LEADER_ELECTION); switch (method.toLowerCase()) { @@ -35,7 +37,7 @@ public static LeaderElection create(Configuration conf) { case "lock": return new LeaseLeaderElection(); default: - throw new UnsupportedOperationException("Do not support " + method + " now"); + throw new UnsupportedOperationException(method + " is not supported for electing the leader"); } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaseLeaderElection.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaseLeaderElection.java index 937174f5d92b..d6ad76dcce9b 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaseLeaderElection.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/leader/LeaseLeaderElection.java @@ -42,6 +42,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.net.InetAddress; import java.util.ArrayList; import java.util.List; @@ -92,9 +93,17 @@ public class LeaseLeaderElection implements LeaderElection { public static final String METASTORE_RENEW_LEASE = "metastore.renew.leader.lease"; private String name; + private String userName; + private String hostName; - private void doWork(LockResponse resp, Configuration conf, + public LeaseLeaderElection() throws IOException { + userName = SecurityUtils.getUser(); + hostName = InetAddress.getLocalHost().getHostName(); + } + + private synchronized void doWork(LockResponse resp, Configuration conf, TableName tableName) throws LeaderException { + long start = System.currentTimeMillis(); lockId = resp.getLockid(); assert resp.getState() == LockState.ACQUIRED || resp.getState() == LockState.WAITING; shutdownWatcher(); @@ -121,6 +130,7 @@ private void doWork(LockResponse resp, Configuration conf, default: throw new IllegalStateException("Unexpected lock state: " + resp.getState()); } + LOG.debug("Spent {}ms to notify the listeners, isLeader: {}", System.currentTimeMillis() - start, isLeader); } private void notifyListener() { @@ -142,13 +152,6 @@ private void notifyListener() { public void tryBeLeader(Configuration conf, TableName table) throws LeaderException { requireNonNull(conf, "conf is null"); requireNonNull(table, "table is null"); - String user, hostName; - try { - user = SecurityUtils.getUser(); - hostName = InetAddress.getLocalHost().getHostName(); - } catch (Exception e) { - throw new LeaderException("Error while getting the username", e); - } if (store == null) { store = TxnUtils.getTxnStore(conf); @@ -165,7 +168,7 @@ public void tryBeLeader(Configuration conf, TableName table) throws LeaderExcept boolean lockable = false; Exception recentException = null; long start = System.currentTimeMillis(); - LockRequest req = new LockRequest(components, user, hostName); + LockRequest req = new LockRequest(components, userName, hostName); int numRetries = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.LOCK_NUMRETRIES); long maxSleep = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.LOCK_SLEEP_BETWEEN_RETRIES, TimeUnit.MILLISECONDS); @@ -175,6 +178,7 @@ public void tryBeLeader(Configuration conf, TableName table) throws LeaderExcept if (res.getState() == LockState.WAITING || res.getState() == LockState.ACQUIRED) { lockable = true; doWork(res, conf, table); + LOG.debug("Spent {}ms to lock the table {}, retries: {}", System.currentTimeMillis() - start, table, i); break; } } catch (NoSuchTxnException | TxnAbortedException e) { @@ -324,6 +328,7 @@ public void runInternal() { } catch (NoSuchTxnException | TxnAbortedException e) { throw new AssertionError("This should not happen, we didn't open txn", e); } catch (NoSuchLockException e) { + LOG.info("No such lock {} for NonLeaderWatcher, try to obtain the lock again...", lockId); reclaim(); } catch (Exception e) { // Wait for next cycle. @@ -379,6 +384,7 @@ public void runInternal() { } catch (NoSuchTxnException | TxnAbortedException e) { throw new AssertionError("This should not happen, we didn't open txn", e); } catch (NoSuchLockException e) { + LOG.info("No such lock {} for Heartbeater, try to obtain the lock again...", lockId); reclaim(); } catch (Exception e) { // Wait for next cycle. @@ -404,6 +410,7 @@ public ReleaseAndRequireWatcher(Configuration conf, super(conf, tableName); timeout = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS) + 3000; + setName("ReleaseAndRequireWatcher"); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricLogger.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricLogger.java index 35450a4fe60d..4fbfced83284 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricLogger.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricLogger.java @@ -23,8 +23,8 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.CompactionMetricsData; -import org.apache.hadoop.hive.metastore.txn.MetricsInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData; +import org.apache.hadoop.hive.metastore.txn.entities.MetricsInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.slf4j.Logger; @@ -33,9 +33,9 @@ import java.util.List; import java.util.concurrent.TimeUnit; -import static org.apache.hadoop.hive.metastore.txn.CompactionMetricsData.MetricType.NUM_DELTAS; -import static org.apache.hadoop.hive.metastore.txn.CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS; -import static org.apache.hadoop.hive.metastore.txn.CompactionMetricsData.MetricType.NUM_SMALL_DELTAS; +import static org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData.MetricType.NUM_DELTAS; +import static org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS; +import static org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData.MetricType.NUM_SMALL_DELTAS; /** * diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java index cc29af053f24..d80f84219eea 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/AcidMetricService.java @@ -32,8 +32,8 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.CompactionMetricsData; -import org.apache.hadoop.hive.metastore.txn.MetricsInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData; +import org.apache.hadoop.hive.metastore.txn.entities.MetricsInfo; import org.apache.hadoop.hive.metastore.txn.TxnStore; import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.thrift.TException; @@ -78,9 +78,9 @@ import static org.apache.hadoop.hive.metastore.metrics.MetricsConstants.OLDEST_OPEN_REPL_TXN_ID; import static org.apache.hadoop.hive.metastore.metrics.MetricsConstants.OLDEST_READY_FOR_CLEANING_AGE; import static org.apache.hadoop.hive.metastore.metrics.MetricsConstants.TABLES_WITH_X_ABORTED_TXNS; -import static org.apache.hadoop.hive.metastore.txn.CompactionMetricsData.MetricType.NUM_DELTAS; -import static org.apache.hadoop.hive.metastore.txn.CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS; -import static org.apache.hadoop.hive.metastore.txn.CompactionMetricsData.MetricType.NUM_SMALL_DELTAS; +import static org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData.MetricType.NUM_DELTAS; +import static org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData.MetricType.NUM_OBSOLETE_DELTAS; +import static org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData.MetricType.NUM_SMALL_DELTAS; /** * Collect and publish ACID and compaction related metrics. diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java index aeede4ee8124..923e540f3a2c 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/metrics/PerfLogger.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hive.metastore.metrics; import com.codahale.metrics.Timer; -import com.google.common.collect.ImmutableMap; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,23 +37,10 @@ public class PerfLogger { static final private Logger LOG = LoggerFactory.getLogger(PerfLogger.class.getName()); protected static final ThreadLocal perfLogger = new ThreadLocal<>(); - public static final String GET_AGGR_COL_STATS = "getAggrColStatsFor"; - public static final String GET_AGGR_COL_STATS_2 = "getAggrColStatsFor_2"; - public static final String LIST_PARTS_WITH_AUTH_INFO = "listPartitionsWithAuthInfo"; - public static final String LIST_PARTS_WITH_AUTH_INFO_2 = "listPartitionsWithAuthInfo_2"; - public static final String LIST_PARTS_BY_EXPR = "listPartitionsByExpr"; - public static final String LIST_PARTS_SPECS_BY_EXPR = "listPartitionsSpecByExpr"; public static final String GET_DATABASE = "getDatabase"; public static final String GET_TABLE = "getTable"; - public static final String GET_TABLE_2 = "getTable_2"; - public static final String GET_PK = "getPrimaryKeys"; - public static final String GET_FK = "getForeignKeys"; - public static final String GET_UNIQ_CONSTRAINTS = "getUniqueConstraints"; - public static final String GET_NOT_NULL_CONSTRAINTS = "getNotNullConstraints"; public static final String GET_TABLE_COL_STATS = "getTableColumnStatistics"; public static final String GET_TABLE_COL_STATS_2 = "getTableColumnStatistics_2"; - public static final String GET_CONFIG_VAL = "getConfigValue"; - private PerfLogger() { // Use getPerfLogger to get an instance of PerfLogger @@ -79,10 +65,6 @@ public static PerfLogger getPerfLogger(boolean resetPerfLogger) { return result; } - public static void setPerfLogger(PerfLogger resetPerfLogger) { - perfLogger.set(resetPerfLogger); - } - /** * Call this function when you start to measure time spent by a piece of code. * @param callerName the logging object to be used. @@ -138,49 +120,6 @@ public long perfLogEnd(String callerName, String method, String additionalInfo) return duration; } - public Long getStartTime(String method) { - long startTime = 0L; - - if (startTimes.containsKey(method)) { - startTime = startTimes.get(method); - } - return startTime; - } - - public Long getEndTime(String method) { - long endTime = 0L; - - if (endTimes.containsKey(method)) { - endTime = endTimes.get(method); - } - return endTime; - } - - public boolean startTimeHasMethod(String method) { - return startTimes.containsKey(method); - } - - public boolean endTimeHasMethod(String method) { - return endTimes.containsKey(method); - } - - public Long getDuration(String method) { - long duration = 0; - if (startTimes.containsKey(method) && endTimes.containsKey(method)) { - duration = endTimes.get(method) - startTimes.get(method); - } - return duration; - } - - - public ImmutableMap getStartTimes() { - return ImmutableMap.copyOf(startTimes); - } - - public ImmutableMap getEndTimes() { - return ImmutableMap.copyOf(endTimes); - } - // Methods for metrics integration. Each thread-local PerfLogger will open/close scope during each perf-log method. private transient Map timerContexts = new HashMap<>(); private transient Timer.Context totalApiCallsTimerContext = null; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java index f2f91cbedfb7..07acf2adcd69 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/ExpressionTree.java @@ -34,6 +34,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; +import com.google.common.collect.Iterables; import com.google.common.collect.Sets; /** @@ -101,6 +102,14 @@ public static Operator fromString(String inputOperator) { " for " + Operator.class.getSimpleName()); } + public static boolean isEqualOperator(Operator op) { + return op == EQUALS; + } + + public static boolean isNotEqualOperator(Operator op) { + return op == NOTEQUALS || op == NOTEQUALS2; + } + @Override public String toString() { return op; @@ -219,34 +228,6 @@ protected void accept(TreeVisitor visitor) throws MetaException { visitor.visit(this); } - /** - * Generates a JDO filter statement - * @param params - * A map of parameter key to values for the filter statement. - * @param filterBuffer The filter builder that is used to build filter. - * @param partitionKeys - * @throws MetaException - */ - public void generateJDOFilter(Configuration conf, - Map params, FilterBuilder filterBuffer, List partitionKeys) throws MetaException { - if (filterBuffer.hasError()) return; - if (lhs != null) { - filterBuffer.append (" ("); - lhs.generateJDOFilter(conf, params, filterBuffer, partitionKeys); - - if (rhs != null) { - if( andOr == LogicalOperator.AND ) { - filterBuffer.append(" && "); - } else { - filterBuffer.append(" || "); - } - - rhs.generateJDOFilter(conf, params, filterBuffer, partitionKeys); - } - filterBuffer.append (") "); - } - } - @Override public String toString() { return "TreeNode{" + @@ -263,10 +244,11 @@ public String toString() { public static class LeafNode extends TreeNode { public String keyName; public Operator operator; - /** Constant expression side of the operator. Can currently be a String or a Long. */ + /** + * Constant expression side of the operator. Can currently be a String or a Long. + */ public Object value; public boolean isReverseOrder = false; - private static final String PARAM_PREFIX = "hive_filter_param_"; @Override protected void accept(TreeVisitor visitor) throws MetaException { @@ -274,8 +256,92 @@ protected void accept(TreeVisitor visitor) throws MetaException { } @Override - public void generateJDOFilter(Configuration conf, Map params, - FilterBuilder filterBuilder, List partitionKeys) throws MetaException { + public String toString() { + return "LeafNode{" + + "keyName='" + keyName + '\'' + + ", operator='" + operator + '\'' + + ", value=" + value + + (isReverseOrder ? ", isReverseOrder=true" : "") + + '}'; + } + + /** + * Get partition column index in the table partition column list that + * corresponds to the key that is being filtered on by this tree node. + * @param partitionKeys list of partition keys. + * @param filterBuilder filter builder used to report error, if any. + * @return The index. + */ + public static int getPartColIndexForFilter(String partitionKeyName, + List partitionKeys, FilterBuilder filterBuilder) throws MetaException { + int partitionColumnIndex = Iterables.indexOf(partitionKeys, key -> partitionKeyName.equalsIgnoreCase(key.getName())); + if( partitionColumnIndex < 0) { + filterBuilder.setError("Specified key <" + partitionKeyName + + "> is not a partitioning key for the table"); + return -1; + } + return partitionColumnIndex; + } + } + + /** + * Generate the JDOQL filter for the given expression tree + */ + public static class JDOFilterGenerator extends TreeVisitor { + + private static final String PARAM_PREFIX = "hive_filter_param_"; + + private Configuration conf; + private List partitionKeys; + // the filter builder to append to. + private FilterBuilder filterBuilder; + // the input map which is updated with the the parameterized values. + // Keys are the parameter names and values are the parameter values + private Map params; + private boolean onParsing = false; + private String keyName; + private Object value; + private Operator operator; + private boolean isReverseOrder; + + public JDOFilterGenerator(Configuration conf, List partitionKeys, + FilterBuilder filterBuilder, Map params) { + this.conf = conf; + this.partitionKeys = partitionKeys; + this.filterBuilder = filterBuilder; + this.params = params; + } + + private void beforeParsing() throws MetaException { + if (!onParsing && !filterBuilder.getFilter().isEmpty()) { + filterBuilder.append(" && "); + } + onParsing = true; + } + + @Override + protected void beginTreeNode(TreeNode node) throws MetaException { + beforeParsing(); + filterBuilder.append("( "); + } + + @Override + protected void midTreeNode(TreeNode node) throws MetaException { + filterBuilder.append((node.getAndOr() == LogicalOperator.AND) ? " && " : " || "); + } + + @Override + protected void endTreeNode(TreeNode node) throws MetaException { + filterBuilder.append(") "); + } + + @Override + protected void visit(LeafNode node) throws MetaException { + beforeParsing(); + keyName = node.keyName; + operator = node.operator; + value = node.value; + isReverseOrder = node.isReverseOrder; if (partitionKeys != null) { generateJDOFilterOverPartitions(conf, params, filterBuilder, partitionKeys); } else { @@ -283,6 +349,11 @@ public void generateJDOFilter(Configuration conf, Map params, } } + @Override + protected boolean shouldStop() { + return filterBuilder.hasError(); + } + //can only support "=" and "!=" for now, because our JDO lib is buggy when // using objects from map.get() private static final Set TABLE_FILTER_OPS = Sets.newHashSet( @@ -360,7 +431,7 @@ private void generateJDOFilterGeneral(Map params, private void generateJDOFilterOverPartitions(Configuration conf, Map params, FilterBuilder filterBuilder, List partitionKeys) throws MetaException { int partitionColumnCount = partitionKeys.size(); - int partitionColumnIndex = getPartColIndexForFilter(partitionKeys, filterBuilder); + int partitionColumnIndex = LeafNode.getPartColIndexForFilter(keyName, partitionKeys, filterBuilder); if (filterBuilder.hasError()) return; boolean canPushDownIntegral = @@ -376,8 +447,8 @@ private void generateJDOFilterOverPartitions(Configuration conf, params.put(paramName, valueAsString); } - boolean isOpEquals = operator == Operator.EQUALS; - if (isOpEquals || operator == Operator.NOTEQUALS || operator == Operator.NOTEQUALS2) { + boolean isOpEquals = Operator.isEqualOperator(operator); + if (isOpEquals || Operator.isNotEqualOperator(operator)) { String partitionKey = partitionKeys.get(partitionColumnIndex).getName(); makeFilterForEquals(partitionKey, valueAsString, paramName, params, partitionColumnIndex, partitionColumnCount, isOpEquals, filterBuilder); @@ -434,32 +505,6 @@ public boolean canJdoUseStringsWithIntegral() { || (operator == Operator.NOTEQUALS2); } - /** - * Get partition column index in the table partition column list that - * corresponds to the key that is being filtered on by this tree node. - * @param partitionKeys list of partition keys. - * @param filterBuilder filter builder used to report error, if any. - * @return The index. - */ - public int getPartColIndexForFilter( - List partitionKeys, FilterBuilder filterBuilder) throws MetaException { - assert (partitionKeys.size() > 0); - int partitionColumnIndex; - for (partitionColumnIndex = 0; partitionColumnIndex < partitionKeys.size(); - ++partitionColumnIndex) { - if (partitionKeys.get(partitionColumnIndex).getName().equalsIgnoreCase(keyName)) { - break; - } - } - if( partitionColumnIndex == partitionKeys.size()) { - filterBuilder.setError("Specified key <" + keyName + - "> is not a partitioning key for the table"); - return -1; - } - - return partitionColumnIndex; - } - /** * Validates and gets the query parameter for JDO filter pushdown based on the column * and the constant stored in this node. @@ -499,16 +544,6 @@ private String getJdoFilterPushdownParam(int partColIndex, return isStringValue ? (String)val : Long.toString((Long)val); } - - @Override - public String toString() { - return "LeafNode{" + - "keyName='" + keyName + '\'' + - ", operator='" + operator + '\'' + - ", value=" + value + - (isReverseOrder ? ", isReverseOrder=true" : "") + - '}'; - } } public void accept(TreeVisitor treeVisitor) throws MetaException { @@ -618,21 +653,4 @@ public void addLeafNode(LeafNode newNode) { nodeStack.push(newNode); } - /** Generate the JDOQL filter for the given expression tree - * @param params the input map which is updated with the - * the parameterized values. Keys are the parameter names and values - * are the parameter values - * @param filterBuilder the filter builder to append to. - * @param partitionKeys - */ - public void generateJDOFilterFragment(Configuration conf, - Map params, FilterBuilder filterBuilder, List partitionKeys) throws MetaException { - if (root == null) { - return; - } - - filterBuilder.append(" && ( "); - root.generateJDOFilter(conf, params, filterBuilder, partitionKeys); - filterBuilder.append(" )"); - } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/PartFilterVisitor.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/PartFilterVisitor.java index 5d68d593c838..1ce0b27eabad 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/PartFilterVisitor.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/parser/PartFilterVisitor.java @@ -17,8 +17,6 @@ */ package org.apache.hadoop.hive.metastore.parser; -import java.sql.Date; -import java.sql.Timestamp; import java.time.format.DateTimeParseException; import java.util.ArrayList; import java.util.List; @@ -231,25 +229,27 @@ public Long visitIntegerLiteral(PartitionFilterParser.IntegerLiteralContext ctx) } @Override - public Date visitDateLiteral(PartitionFilterParser.DateLiteralContext ctx) { + public String visitDateLiteral(PartitionFilterParser.DateLiteralContext ctx) { PartitionFilterParser.DateContext date = ctx.date(); String dateValue = unquoteString(date.value.getText()); try { - return MetaStoreUtils.convertStringToDate(dateValue); + MetaStoreUtils.convertStringToDate(dateValue); } catch (DateTimeParseException e) { throw new ParseCancellationException(e.getMessage()); } + return dateValue; } @Override - public Timestamp visitTimestampLiteral(PartitionFilterParser.TimestampLiteralContext ctx) { + public String visitTimestampLiteral(PartitionFilterParser.TimestampLiteralContext ctx) { PartitionFilterParser.TimestampContext timestamp = ctx.timestamp(); String timestampValue = unquoteString(timestamp.value.getText()); try { - return MetaStoreUtils.convertStringToTimestamp(timestampValue); + MetaStoreUtils.convertStringToTimestamp(timestampValue); } catch (DateTimeParseException e) { throw new ParseCancellationException(e.getMessage()); } + return timestampValue; } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java index b793345dd945..2eee929bdeee 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/SQLGenerator.java @@ -21,6 +21,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.DatabaseProduct; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -181,6 +183,55 @@ public PreparedStatement prepareStmtWithParameters(Connection dbConn, String sql return pst; } + + + /** + * Oracle SQL query that creates or replaces view HMS_SUMMARY. + */ + private static final String CREATE_METADATASUMMARY_ORACLE = "CREATE OR REPLACE VIEW METADATASUMMARYALL AS SELECT a.TBL_ID, a.TBL_NAME, a.OWNER as \"CTLG\", a.TBL_TYPE, a.CREATE_TIME, a.DB_ID, a.SD_ID, b.NAME, c.INPUT_FORMAT, c.IS_COMPRESSED, c.LOCATION, c.OUTPUT_FORMAT,c.SERDE_ID, d.SLIB, TO_CHAR(e.PARAM_VALUE) as \"PARAM_VAL\", count(j.COLUMN_NAME) as \"TOTAL_COLUMN_COUNT\", jj.ARRAY_COLUMN_COUNT, jj.STRUCT_COLUMN_COUNT, jj.MAP_COLUMN_COUNT, k.PARTITION_KEY_NAME as \"PARTITION_COLUMN\", m.PARTITION_CNT, CAST(CAST(q.NUM_FILES AS VARCHAR2(200)) AS NUMBER) as \"num_files\", CAST(q.TOTAL_SIZE AS NUMBER) as \"total_size\", CAST(q.NUM_ROWS AS NUMBER) as \"num_rows\", q.WRITE_FORMAT_DEFAULT, q.TRANSACTIONAL_PROPERTIES FROM TBLS a LEFT JOIN DBS b on a.DB_ID = b.DB_ID LEFT JOIN SDS c on a.SD_ID = c.SD_ID LEFT JOIN SERDES d on c.SERDE_ID = d.SERDE_ID LEFT JOIN (select SERDE_ID,PARAM_KEY,PARAM_VALUE from SERDE_PARAMS where PARAM_KEY = 'field.delim') e on c.SERDE_ID = e.SERDE_ID LEFT JOIN COLUMNS_V2 j on c.CD_ID = j.CD_ID LEFT JOIN (SELECT CD_ID, sum(CASE WHEN TYPE_NAME like 'array%' THEN 1 ELSE 0 END) AS \"ARRAY_COLUMN_COUNT\", sum(CASE WHEN TYPE_NAME like 'struct%' THEN 1 ELSE 0 END) AS \"STRUCT_COLUMN_COUNT\", sum(CASE WHEN TYPE_NAME like 'map%' THEN 1 ELSE 0 END) AS \"MAP_COLUMN_COUNT\" from COLUMNS_V2 group by CD_ID) jj on jj.CD_ID=c.CD_ID LEFT JOIN (select TBL_ID, LISTAGG(PKEY_NAME, ',') as PARTITION_KEY_NAME from PARTITION_KEYS group by TBL_ID) k on a.TBL_ID = k.TBL_ID LEFT JOIN (select SERDE_ID,PARAM_KEY,PARAM_VALUE from SERDE_PARAMS where PARAM_KEY = 'serialization.format') f on c.SERDE_ID = f.SERDE_ID LEFT JOIN (select TBL_ID,PARAM_KEY,PARAM_VALUE from TABLE_PARAMS where PARAM_KEY = 'comment') g on a.TBL_ID = g.TBL_ID LEFT JOIN (select TBL_ID, PARAM_KEY,PARAM_VALUE from TABLE_PARAMS where PARAM_KEY = 'transient_lastDdlTime') h on a.TBL_ID = h.TBL_ID LEFT JOIN (select TBL_ID,COUNT(PART_ID) as PARTITION_CNT from PARTITIONS group by TBL_ID) m on a.TBL_ID = m.TBL_ID LEFT JOIN (SELECT aa.TBL_ID, aa.NUM_FILES + case when bb.NUM_FILES is not null then bb.NUM_FILES else 0 end AS \"NUM_FILES\", aa.NUM_ROWS + case when bb.NUM_ROWS is not null then bb.NUM_ROWS else 0 end AS \"NUM_ROWS\", aa.TOTAL_SIZE + case when bb.TOTAL_SIZE is not null then bb.TOTAL_SIZE else 0 end AS \"TOTAL_SIZE\", aa.WRITE_FORMAT_DEFAULT, aa.TRANSACTIONAL_PROPERTIES from (select u.TBL_ID, NUM_FILES, NUM_ROWS, TOTAL_SIZE, WRITE_FORMAT_DEFAULT, TRANSACTIONAL_PROPERTIES from (select TBL_ID, max(CASE PARAM_KEY WHEN 'numFiles' THEN CAST(CAST(PARAM_VALUE AS VARCHAR2(200)) AS NUMBER) ELSE 0 END) AS \"NUM_FILES\", max(CASE PARAM_KEY WHEN 'numRows' THEN CAST(CAST(PARAM_VALUE AS VARCHAR2(200)) AS NUMBER) ELSE 0 END) AS \"NUM_ROWS\", max(CASE PARAM_KEY WHEN 'totalSize' THEN CAST(CAST(PARAM_VALUE AS VARCHAR2(200)) AS NUMBER) ELSE 0 END) AS \"TOTAL_SIZE\" from TABLE_PARAMS group by TBL_ID) u left join (select TBL_ID, CAST(PARAM_VALUE AS VARCHAR2(200)) as \"WRITE_FORMAT_DEFAULT\" from TABLE_PARAMS where PARAM_KEY = 'write.format.default') v on u.TBL_ID = v.TBL_ID left join (select TBL_ID, CAST(PARAM_VALUE AS VARCHAR2(200)) as \"TRANSACTIONAL_PROPERTIES\" from TABLE_PARAMS where PARAM_KEY = 'transactional_properties') w on u.TBL_ID = w.TBL_ID) aa left join (SELECT y.TBL_ID, SUM(x.NUM_FILES) AS \"NUM_FILES\", SUM(x.NUM_ROWS) AS \"NUM_ROWS\", SUM(x.TOTAL_SIZE) AS \"TOTAL_SIZE\" FROM PARTITIONS y left join (SELECT PART_ID, max(CASE PARAM_KEY WHEN 'numFiles' THEN CAST(CAST(PARAM_VALUE AS VARCHAR2(200)) AS NUMBER) ELSE 0 END) AS \"NUM_FILES\", max(CASE PARAM_KEY WHEN 'numRows' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS NUMBER) ELSE 0 END) AS \"NUM_ROWS\", max(CASE PARAM_KEY WHEN 'totalSize' THEN CAST(CAST(PARAM_VALUE AS VARCHAR2(200)) AS NUMBER) ELSE 0 END) AS \"TOTAL_SIZE\" FROM PARTITION_PARAMS group by PART_ID) x ON y.PART_ID = x.PART_ID group by y.TBL_ID) bb on aa.TBL_ID = bb.TBL_ID) q on a.TBL_ID = q.TBL_ID group by a.TBL_ID, a.TBL_NAME, a.OWNER, a.TBL_TYPE, a.CREATE_TIME, a.DB_ID, a.SD_ID, b.NAME, c.INPUT_FORMAT, c.IS_COMPRESSED, c.LOCATION, c.OUTPUT_FORMAT,c.SERDE_ID, d.SLIB, TO_CHAR(e.PARAM_VALUE), jj.ARRAY_COLUMN_COUNT, jj.STRUCT_COLUMN_COUNT,jj.MAP_COLUMN_COUNT, k.PARTITION_KEY_NAME, m.PARTITION_CNT,q.NUM_FILES, q.TOTAL_SIZE, q.NUM_ROWS, q.WRITE_FORMAT_DEFAULT, q.TRANSACTIONAL_PROPERTIES;"; + /** + * MySQL query that creates or replaces view HMS_SUMMARY. + */ + private static final String CREATE_METADATASUMMARY_MYSQL = "CREATE OR REPLACE VIEW METADATASUMMARYALL AS SELECT a.TBL_ID, a.TBL_NAME, a.OWNER as \"CTLG\", a.TBL_TYPE, a.CREATE_TIME, a.DB_ID, a.SD_ID, b.NAME, c.INPUT_FORMAT, c.IS_COMPRESSED, c.LOCATION, c.OUTPUT_FORMAT,c.SERDE_ID, d.SLIB, e.PARAM_VALUE, count(j.COLUMN_NAME) as \"TOTAL_COLUMN_COUNT\", jj.ARRAY_COLUMN_COUNT as \"ARRAY_COLUMN_COUNT\", jj.STRUCT_COLUMN_COUNT as \"STRUCT_COLUMN_COUNT\", jj.MAP_COLUMN_COUNT as \"MAP_COLUMN_COUNT\", k.PARTITION_KEY_NAME as \"PARTITION_COLUMN\", m.PARTITION_CNT, CAST(CAST(q.NUM_FILES AS CHAR(200)) AS SIGNED) as NUM_FILES, CAST(q.TOTAL_SIZE AS SIGNED) as TOTAL_SIZE, CAST(q.NUM_ROWS AS SIGNED) as NUM_ROWS, q.WRITE_FORMAT_DEFAULT, q.TRANSACTIONAL_PROPERTIES FROM TBLS a left JOIN DBS b on a.DB_ID = b.DB_ID left JOIN SDS c on a.SD_ID = c.SD_ID LEFT JOIN SERDES d on c.SERDE_ID = d.SERDE_ID left JOIN (select SERDE_ID,PARAM_KEY,PARAM_VALUE from SERDE_PARAMS where PARAM_KEY = 'field.delim') e on c.SERDE_ID = e.SERDE_ID left join COLUMNS_V2 j on c.CD_ID = j.CD_ID LEFT JOIN (SELECT CD_ID, sum(CASE WHEN TYPE_NAME like 'array%' THEN 1 ELSE 0 END) AS \"ARRAY_COLUMN_COUNT\", sum(CASE WHEN TYPE_NAME like 'struct%' THEN 1 ELSE 0 END) AS \"STRUCT_COLUMN_COUNT\", sum(CASE WHEN TYPE_NAME like 'map%' THEN 1 ELSE 0 END) AS \"MAP_COLUMN_COUNT\" from COLUMNS_V2 group by CD_ID) jj on jj.CD_ID=c.CD_ID left JOIN(select TBL_ID, GROUP_CONCAT(PKEY_NAME, ',') as PARTITION_KEY_NAME from PARTITION_KEYS group by TBL_ID) k on a.TBL_ID = k.TBL_ID left JOIN (select SERDE_ID,PARAM_KEY,PARAM_VALUE from SERDE_PARAMS where PARAM_KEY = 'serialization.format') f on c.SERDE_ID = f.SERDE_ID left join (select TBL_ID,PARAM_KEY,PARAM_VALUE from TABLE_PARAMS where PARAM_KEY = 'comment') g on a.TBL_ID = g.TBL_ID left JOIN (select TBL_ID, PARAM_KEY,PARAM_VALUE from TABLE_PARAMS where PARAM_KEY = 'transient_lastDdlTime') h on a.TBL_ID = h.TBL_ID left join (select TBL_ID,COUNT(PART_ID) as PARTITION_CNT from PARTITIONS group by TBL_ID) m on a.TBL_ID = m.TBL_ID Left join (SELECT aa.TBL_ID, aa.NUM_FILES + case when bb.NUM_FILES is not null then bb.NUM_FILES else 0 end AS \"NUM_FILES\", aa.NUM_ROWS + case when bb.NUM_ROWS is not null then bb.NUM_ROWS else 0 end AS \"NUM_ROWS\", aa.TOTAL_SIZE + case when bb.TOTAL_SIZE is not null then bb.TOTAL_SIZE else 0 end AS \"TOTAL_SIZE\", aa.WRITE_FORMAT_DEFAULT, aa.TRANSACTIONAL_PROPERTIES from (select u.TBL_ID, NUM_FILES, NUM_ROWS, TOTAL_SIZE, WRITE_FORMAT_DEFAULT, TRANSACTIONAL_PROPERTIES from (select TBL_ID, max(CASE PARAM_KEY WHEN 'numFiles' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS SIGNED) ELSE 0 END) AS \"NUM_FILES\", max(CASE PARAM_KEY WHEN 'numRows' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS SIGNED) ELSE 0 END) AS \"NUM_ROWS\", max(CASE PARAM_KEY WHEN 'totalSize' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS SIGNED) ELSE 0 END) AS \"TOTAL_SIZE\" from TABLE_PARAMS group by TBL_ID) u left join (select TBL_ID, CAST(PARAM_VALUE AS CHAR(200)) as \"WRITE_FORMAT_DEFAULT\" from TABLE_PARAMS where PARAM_KEY = 'write.format.default') v on u.TBL_ID = v.TBL_ID left join (select TBL_ID, CAST(PARAM_VALUE AS CHAR(200)) as \"TRANSACTIONAL_PROPERTIES\" from TABLE_PARAMS where PARAM_KEY = 'transactional_properties') w on u.TBL_ID = w.TBL_ID) aa left join (SELECT y.TBL_ID, SUM(x.NUM_FILES) AS \"NUM_FILES\", SUM(x.NUM_ROWS) AS \"NUM_ROWS\", SUM(x.TOTAL_SIZE) AS \"TOTAL_SIZE\" FROM PARTITIONS y left JOIN (SELECT PART_ID, max(CASE PARAM_KEY WHEN 'numFiles' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS SIGNED) ELSE 0 END) AS \"NUM_FILES\", max(CASE PARAM_KEY WHEN 'numRows' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS SIGNED) ELSE 0 END) AS \"NUM_ROWS\", max(CASE PARAM_KEY WHEN 'totalSize' THEN CAST(CAST(PARAM_VALUE AS CHAR(200)) AS SIGNED) ELSE 0 END) AS \"TOTAL_SIZE\" FROM PARTITION_PARAMS group by PART_ID)x ON y.PART_ID=x.PART_ID group by y.TBL_ID) bb on aa.TBL_ID = bb.TBL_ID) q on a.TBL_ID = q.TBL_ID group by a.TBL_ID, a.TBL_NAME, a.OWNER, a.TBL_TYPE, a.CREATE_TIME, a.DB_ID, a.SD_ID, b.NAME, c.INPUT_FORMAT, c.IS_COMPRESSED, c.LOCATION, c.OUTPUT_FORMAT,c.SERDE_ID, d.SLIB, e.PARAM_VALUE, jj.ARRAY_COLUMN_COUNT, jj.STRUCT_COLUMN_COUNT,jj.MAP_COLUMN_COUNT, k.PARTITION_KEY_NAME, m.PARTITION_CNT,q.NUM_FILES, q.TOTAL_SIZE, q.NUM_ROWS, q.WRITE_FORMAT_DEFAULT, q.TRANSACTIONAL_PROPERTIES;"; + + /** + * Postgres SQL query that creates or replaces view HMS_SUMMARY. + */ + private static final String CREATE_METADATASUMMARY_POSTGRES = "CREATE OR REPLACE VIEW \"METADATASUMMARYALL\" AS SELECT a.\"TBL_ID\", a.\"TBL_NAME\", a.\"OWNER\" as \"CTLG\", a.\"TBL_TYPE\", a.\"CREATE_TIME\", a.\"DB_ID\", a.\"SD_ID\", b.\"NAME\", c.\"INPUT_FORMAT\", c.\"IS_COMPRESSED\", c.\"LOCATION\", c.\"OUTPUT_FORMAT\",c.\"SERDE_ID\", d.\"SLIB\", e.\"PARAM_VALUE\", count(j.\"COLUMN_NAME\") as \"TOTAL_COLUMN_COUNT\", jj.\"ARRAY_COLUMN_COUNT\", jj.\"STRUCT_COLUMN_COUNT\", jj.\"MAP_COLUMN_COUNT\", k.\"PARTITION_KEY_NAME\" as \"PARTITION_COLUMN\", m.\"PARTITION_CNT\", CAST(CAST(q.\"NUM_FILES\" AS CHAR(200)) AS BIGINT), CAST(q.\"TOTAL_SIZE\" AS BIGINT), CAST(q.\"NUM_ROWS\" AS BIGINT), q.\"WRITE_FORMAT_DEFAULT\", q.\"TRANSACTIONAL_PROPERTIES\" FROM \"TBLS\" a LEFT JOIN \"DBS\" b on a.\"DB_ID\" = b.\"DB_ID\" LEFT JOIN \"SDS\" c on a.\"SD_ID\" = c.\"SD_ID\" LEFT JOIN \"SERDES\" d on c.\"SERDE_ID\" = d.\"SERDE_ID\" LEFT JOIN (select \"SERDE_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from \"SERDE_PARAMS\" where \"PARAM_KEY\" = 'field.delim') e on c.\"SERDE_ID\" = e.\"SERDE_ID\" LEFT JOIN \"COLUMNS_V2\" j on c.\"CD_ID\" = j.\"CD_ID\" LEFT JOIN (SELECT \"CD_ID\", sum(CASE WHEN \"TYPE_NAME\" like 'array%' THEN 1 ELSE 0 END) AS \"ARRAY_COLUMN_COUNT\", sum(CASE WHEN \"TYPE_NAME\" like 'struct%' THEN 1 ELSE 0 END) AS \"STRUCT_COLUMN_COUNT\", sum(CASE WHEN \"TYPE_NAME\" like 'map%' THEN 1 ELSE 0 END) AS \"MAP_COLUMN_COUNT\" from \"COLUMNS_V2\" group by \"CD_ID\") jj on jj.\"CD_ID\" = c.\"CD_ID\" LEFT JOIN (select \"TBL_ID\", string_agg(\"PKEY_NAME\", ',') as \"PARTITION_KEY_NAME\" from \"PARTITION_KEYS\" group by \"TBL_ID\") k on a.\"TBL_ID\" = k.\"TBL_ID\" LEFT JOIN (select \"SERDE_ID\", \"PARAM_KEY\", \"PARAM_VALUE\" from \"SERDE_PARAMS\" where \"PARAM_KEY\" = 'serialization.format') f on c.\"SERDE_ID\" = f.\"SERDE_ID\" LEFT JOIN (select \"TBL_ID\",\"PARAM_KEY\",\"PARAM_VALUE\" from \"TABLE_PARAMS\" where \"PARAM_KEY\" = 'comment') g on a.\"TBL_ID\" = g.\"TBL_ID\" LEFT JOIN (select \"TBL_ID\", \"PARAM_KEY\",\"PARAM_VALUE\" from \"TABLE_PARAMS\" where \"PARAM_KEY\" = 'transient_lastDdlTime') h on a.\"TBL_ID\" = h.\"TBL_ID\" LEFT JOIN (select \"TBL_ID\",COUNT(\"PART_ID\") as \"PARTITION_CNT\" from \"PARTITIONS\" group by \"TBL_ID\") m on a.\"TBL_ID\" = m.\"TBL_ID\" LEFT JOIN (SELECT aa.\"TBL_ID\", aa.\"NUM_FILES\" + case when bb.\"NUM_FILES\" is not null then bb.\"NUM_FILES\" else 0 end AS \"NUM_FILES\", aa.\"NUM_ROWS\" + case when bb.\"NUM_ROWS\" is not null then bb.\"NUM_ROWS\" else 0 end AS \"NUM_ROWS\", aa.\"TOTAL_SIZE\" + case when bb.\"TOTAL_SIZE\" is not null then bb.\"TOTAL_SIZE\" else 0 end AS \"TOTAL_SIZE\", aa.\"WRITE_FORMAT_DEFAULT\", aa.\"TRANSACTIONAL_PROPERTIES\" from (select u.\"TBL_ID\", \"NUM_FILES\", \"NUM_ROWS\", \"TOTAL_SIZE\", \"WRITE_FORMAT_DEFAULT\", \"TRANSACTIONAL_PROPERTIES\" from (select \"TBL_ID\", max(CASE \"PARAM_KEY\" WHEN 'numFiles' THEN CAST(CAST(\"PARAM_VALUE\" AS CHAR(200)) AS BIGINT) ELSE 0 END) AS \"NUM_FILES\", max(CASE \"PARAM_KEY\" WHEN 'numRows' THEN CAST(CAST(\"PARAM_VALUE\" AS CHAR(200)) AS BIGINT) ELSE 0 END) AS \"NUM_ROWS\", max(CASE \"PARAM_KEY\" WHEN 'totalSize' THEN CAST(CAST(\"PARAM_VALUE\" AS CHAR(200)) AS BIGINT) ELSE 0 END) AS \"TOTAL_SIZE\" from \"TABLE_PARAMS\" group by \"TBL_ID\") u left join (select \"TBL_ID\", CAST(\"PARAM_VALUE\" AS CHAR(200)) as \"WRITE_FORMAT_DEFAULT\" from \"TABLE_PARAMS\" where \"PARAM_KEY\" = 'write.format.default') v on u.\"TBL_ID\" = v.\"TBL_ID\" left join (select \"TBL_ID\", CAST(\"PARAM_VALUE\" AS CHAR(200)) as \"TRANSACTIONAL_PROPERTIES\" from \"TABLE_PARAMS\" where \"PARAM_KEY\" = 'transactional_properties') w on u.\"TBL_ID\" = w.\"TBL_ID\") aa left join (SELECT y.\"TBL_ID\", SUM(x.\"NUM_FILES\") AS \"NUM_FILES\", SUM(x.\"NUM_ROWS\") AS \"NUM_ROWS\", SUM(x.\"TOTAL_SIZE\") AS \"TOTAL_SIZE\" FROM \"PARTITIONS\" y left join (SELECT \"PART_ID\", max(CASE \"PARAM_KEY\" WHEN 'numFiles' THEN CAST(CAST(\"PARAM_VALUE\" AS CHAR(200)) AS BIGINT) ELSE 0 END) AS \"NUM_FILES\", max(CASE \"PARAM_KEY\" WHEN 'numRows' THEN CAST(CAST(\"PARAM_VALUE\" AS CHAR(200)) AS BIGINT) ELSE 0 END) AS \"NUM_ROWS\", max(CASE \"PARAM_KEY\" WHEN 'totalSize' THEN CAST(CAST(\"PARAM_VALUE\" AS CHAR(200)) AS BIGINT) ELSE 0 END) AS \"TOTAL_SIZE\" FROM \"PARTITION_PARAMS\" group by \"PART_ID\") x ON y.\"PART_ID\" = x.\"PART_ID\" group by y.\"TBL_ID\") bb on aa.\"TBL_ID\" = bb.\"TBL_ID\") q on a.\"TBL_ID\" = q.\"TBL_ID\" group by a.\"TBL_ID\", a.\"TBL_NAME\", a.\"OWNER\", a.\"TBL_TYPE\", a.\"CREATE_TIME\", a.\"DB_ID\", a.\"SD_ID\", b.\"NAME\", c.\"INPUT_FORMAT\", c.\"IS_COMPRESSED\", c.\"LOCATION\", c.\"OUTPUT_FORMAT\",c.\"SERDE_ID\", d.\"SLIB\", e.\"PARAM_VALUE\", jj.\"ARRAY_COLUMN_COUNT\", jj.\"STRUCT_COLUMN_COUNT\",jj.\"MAP_COLUMN_COUNT\", k.\"PARTITION_KEY_NAME\", m.\"PARTITION_CNT\",q.\"NUM_FILES\", q.\"TOTAL_SIZE\", q.\"NUM_ROWS\", q.\"WRITE_FORMAT_DEFAULT\", q.\"TRANSACTIONAL_PROPERTIES\";"; + + /** + * Create or replace a view that stores all the info regarding metastore summary. + * @return + */ + public List getCreateQueriesForMetastoreSummary() { + List queries = new ArrayList(); + switch (dbProduct.dbType) { + case MYSQL: + queries.add(CREATE_METADATASUMMARY_MYSQL); + break; + case DERBY: + return null; + case SQLSERVER: + return null; + case ORACLE: + queries.add(CREATE_METADATASUMMARY_ORACLE); + break; + case POSTGRES: + queries.add(CREATE_METADATASUMMARY_POSTGRES); + break; + } + return queries; + } + + public String getSelectQueryForMetastoreSummary() { + if (dbProduct.dbType == DatabaseProduct.dbType.DERBY || dbProduct.dbType == DatabaseProduct.dbType.MYSQL || dbProduct.dbType == DatabaseProduct.dbType.ORACLE) { + return "select * from METADATASUMMARYALL"; + } else if (dbProduct.dbType == DatabaseProduct.dbType.POSTGRES){ + return "select * from \"METADATASUMMARYALL\""; + } + return null; + } + public DatabaseProduct getDbProduct() { return dbProduct; } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaTool.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaTool.java index 913146e07bc8..1d2918c6f7dc 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaTool.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaTool.java @@ -28,6 +28,7 @@ * - list the file system root * - execute JDOQL against the metastore using DataNucleus * - perform HA name node upgrade + * - summarize the data in HMS */ public final class HiveMetaTool { private static final Logger LOGGER = LoggerFactory.getLogger(HiveMetaTool.class.getName()); @@ -36,7 +37,7 @@ private HiveMetaTool() { throw new UnsupportedOperationException("HiveMetaTool should not be instantiated"); } - public static void main(String[] args) { + public static void execute(String[] args) throws Exception { HiveMetaToolCommandLine cl = HiveMetaToolCommandLine.parseArguments(args); ObjectStore objectStore = new ObjectStore(); @@ -54,6 +55,8 @@ public static void main(String[] args) { task = new MetaToolTaskListExtTblLocs(); } else if (cl.isDiffExtTblLocs()) { task = new MetaToolTaskDiffExtTblLocs(); + } else if (cl.isMetadataSummary()) { + task = new MetaToolTaskMetadataSummary(); } else { throw new IllegalArgumentException("No task was specified!"); } @@ -61,10 +64,20 @@ public static void main(String[] args) { task.setObjectStore(objectStore); task.setCommandLine(cl); task.execute(); + } finally { + objectStore.shutdown(); + } + } + + public static void main(String[] args) { + int status = 0; + try { + execute(args); } catch (Exception e) { + status = -1; LOGGER.error("Exception occured", e); } finally { - objectStore.shutdown(); + System.exit(status); } } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaToolCommandLine.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaToolCommandLine.java index ce43a8c4f57a..612af03d6e02 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaToolCommandLine.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/HiveMetaToolCommandLine.java @@ -36,6 +36,13 @@ class HiveMetaToolCommandLine { private static final Logger LOGGER = LoggerFactory.getLogger(HiveMetaToolCommandLine.class.getName()); @SuppressWarnings("static-access") + private static final Option METADATA_SUMMARY = OptionBuilder + .withArgName("output-format> " + " catalogClass; + /** The Catalog constructor. */ + private final Constructor constructor; + /** The Catalog.setConf() method. */ + private final Method setConf; + /** The Catalog.initialize() method. */ + private final Method initialize; + /** The Catalog.listNamespaces() method. */ + private final Method listNamespaces; + /** The Namespace.listTables() method. */ + private volatile Method listTablesMethod; + /** The Catalog.loadTable() method. */ + private volatile Method loadTableMethod; + /** The Table.operations() method. */ + private volatile Method operationsMethod; + /** The *.current() method. */ + private volatile Method currentMethod; + private volatile Method schemaMethod; + private volatile Method columnsMethod; + private volatile Method specMethod; + private volatile Method fieldsMethod; + private volatile Method currentSnapshotMethod; + /** The Snapshot.summary() .*/ + private volatile Method summaryMethod; + + /** + * Creates the catalog reflector. + * + * @throws ClassNotFoundException + * @throws InstantiationException + * @throws IllegalAccessException + */ + IcebergReflector() throws ClassNotFoundException, InstantiationException, IllegalAccessException, NoSuchMethodException { + catalogClass = Class.forName(CATALOG_CLASS); + // constructor + this.constructor = catalogClass.getConstructor(); + this.setConf = catalogClass.getMethod("setConf", org.apache.hadoop.conf.Configuration.class); + this.initialize = catalogClass.getMethod("initialize", String.class, Map.class); + this.listNamespaces = catalogClass.getMethod("listNamespaces"); + } + + CatalogHandle newCatalog() { + try { + Object catalog = constructor.newInstance(); + return new CatalogHandle(catalog); + } catch (Throwable e) { + // ignore + } + return null; + } + + /** + * A catalog instance. + */ + class CatalogHandle { + private final Object catalog; + + CatalogHandle(Object catalog) { + this.catalog = catalog; + } + + void setConf(Configuration conf) throws InvocationTargetException, IllegalAccessException { + setConf.invoke(catalog, conf); + } + + void initialize(String name, Map properties) throws InvocationTargetException, IllegalAccessException { + initialize.invoke(catalog, name, properties); + } + + Collection listNamespaces() throws InvocationTargetException, IllegalAccessException { + return (Collection) listNamespaces.invoke(catalog); + } + + Collection listTables(Object namespace) throws InvocationTargetException, IllegalAccessException, NoSuchMethodException { + if (listTablesMethod == null) { + Class namespaceClazz = namespace.getClass(); + listTablesMethod = catalogClass.getMethod("listTables", namespaceClazz); + } + return (Collection) listTablesMethod.invoke(namespace); + } + + Object loadTable(Object tableIdentifier) throws InvocationTargetException, IllegalAccessException, NoSuchMethodException { + if (loadTableMethod == null) { + Class identifierClazz = tableIdentifier.getClass(); + loadTableMethod = catalogClass.getMethod("loadTable", identifierClazz); + } + return loadTableMethod.invoke(tableIdentifier); + } + + /** + * Creates the metadata summary for a given Iceberg table. + * @param table the table instance + * @return a summary instance + * @throws NoSuchMethodException + * @throws InvocationTargetException + * @throws IllegalAccessException + */ + MetadataTableSummary collectMetadata(Object table) throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { + Method nameMethod = table.getClass().getMethod("name"); + String tableFullName = (String) nameMethod.invoke(table); + String[] paths = tableFullName.split("\\."); + String catalogName = paths[0]; + String databaseName = paths[1]; + String tableName = paths[2]; + if (operationsMethod == null) { + operationsMethod = table.getClass().getMethod("operations"); + } + Object operations = operationsMethod.invoke(table); + if (currentMethod == null) { + currentMethod = operations.getClass().getMethod("current"); + } + Object meta = currentMethod.invoke(operations); + if (schemaMethod == null) { + schemaMethod = meta.getClass().getMethod("schema"); + } + Object schema = schemaMethod.invoke(meta); + if (columnsMethod == null) { + columnsMethod = schema.getClass().getMethod("columns"); + } + Collection columns = (Collection) columnsMethod.invoke(schema); + if (specMethod == null) { + specMethod = meta.getClass().getMethod("spec"); + } + Object spec = specMethod.invoke(meta); + if (fieldsMethod == null) { + fieldsMethod = spec.getClass().getMethod("fields"); + } + Collection fields = (Collection) fieldsMethod.invoke(spec); + int columnCount= columns.size(); + int partitionColumnCount = fields.size(); + + if (currentSnapshotMethod == null) { + currentSnapshotMethod = meta.getClass().getMethod("currentSnapshot"); + } + Object snapshot = currentSnapshotMethod.invoke(meta); + MetadataTableSummary metadataTableSummary = new MetadataTableSummary(); + // sometimes current snapshot could be null + if (snapshot != null) { + if (summaryMethod == null) { + summaryMethod = snapshot.getClass().getDeclaredMethod("summary"); + } + Map summaryMap = (Map) summaryMethod.invoke(snapshot); + BigInteger totalSizeBytes = new BigInteger(summaryMap.get("total-files-size")); + BigInteger totalRowsCount = new BigInteger(summaryMap.get("total-records")); + BigInteger totalFilesCount = new BigInteger(summaryMap.get("total-data-files")); + metadataTableSummary.setTotalSize(totalSizeBytes); + metadataTableSummary.setSizeNumRows(totalRowsCount); + metadataTableSummary.setSizeNumFiles(totalFilesCount); + } + metadataTableSummary.setCtlgName(catalogName); + metadataTableSummary.setDbName(databaseName); + metadataTableSummary.setTblName(tableName); + metadataTableSummary.setColCount(columnCount); + metadataTableSummary.setPartitionCount(partitionColumnCount); + return metadataTableSummary; + } + } +} + diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/IcebergTableMetadataHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/IcebergTableMetadataHandler.java new file mode 100644 index 000000000000..d632ac33a8c8 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/IcebergTableMetadataHandler.java @@ -0,0 +1,107 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.tools.metatool; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.lang.reflect.InvocationTargetException; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +public class IcebergTableMetadataHandler { + static final private Logger LOG = LoggerFactory.getLogger(IcebergTableMetadataHandler.class.getName()); + // "hive" is the catalog name, it is something we can set up by ourselves + private static final String CATALOG_NAME_IN_ICEBERG = "hive"; + private static final String CATALOG_CLASS = "org.apache.iceberg.hive.HiveCatalog"; + private final String mgdWarehouse, extWarehouse, uris; + private final Configuration conf; + private boolean isEnabled = false; + + private static final IcebergReflector IR; + static { + IcebergReflector ir = null; + try { + ir = new IcebergReflector(); + } catch (ClassNotFoundException | InstantiationException | IllegalAccessException | NoSuchMethodException e) { + LOG.warn("Could not find or instantiate class " + CATALOG_CLASS + ", cannot retrieve stats for iceberg tables.", e); + } + IR = ir; + } + + public IcebergTableMetadataHandler(Configuration config) { + this.conf = config; + mgdWarehouse = MetastoreConf.getAsString(this.conf, MetastoreConf.ConfVars.WAREHOUSE); + extWarehouse = MetastoreConf.getAsString(this.conf, MetastoreConf.ConfVars.WAREHOUSE_EXTERNAL); + uris = MetastoreConf.getAsString(this.conf, MetastoreConf.ConfVars.THRIFT_URIS); + } + + public boolean isEnabled() { + if (!isEnabled) { + isEnabled = IR != null; + } + return isEnabled; + } + + + // create a HiveCatalog instance, use it to get all iceberg table identifiers + public Map getIcebergTables() { + Map metadataSummaryMap = new HashMap<>(); + if (!isEnabled()) return metadataSummaryMap; + + try { + IcebergReflector.CatalogHandle catalog = IR.newCatalog(); + catalog.setConf(conf); + //catalog.setConf(conf); + Map propertiesMap = new HashMap<>(); + + // get iceberg properties and print them out to check + LOG.info("Initializing iceberg handler with properties: warehouse:{} external warehouse:{} thrift uris:{}", + mgdWarehouse, extWarehouse, uris); + propertiesMap.put("warehouse", mgdWarehouse); + propertiesMap.put("externalwarehouse", extWarehouse); + propertiesMap.put("uri", uris); + catalog.initialize(CATALOG_NAME_IN_ICEBERG, propertiesMap); + + // get all the name spaces + Collection listOfNamespaces = catalog.listNamespaces(); + for (Object namespace : listOfNamespaces) { + Collection identifierList = catalog.listTables(namespace); + if (identifierList.isEmpty()) + continue; + for (Object tblId : identifierList) { + // fetch the metadata information for every iceberg table + Object tbl = catalog.loadTable(tblId); + MetadataTableSummary metadataTableSummary = catalog.collectMetadata(tbl); + String tableName = metadataTableSummary.getTblName(); + + metadataSummaryMap.putIfAbsent(tableName, metadataTableSummary); + LOG.debug("Adding table: {} {}", tableName, metadataTableSummary); + } + } + + } catch (InvocationTargetException | IllegalAccessException | NoSuchMethodException e) { + isEnabled = false; + LOG.warn("Could not find or instantiate class " + CATALOG_CLASS + ", cannot retrieve stats for iceberg tables."); + } + return metadataSummaryMap; + } +} \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/MetaToolTaskMetadataSummary.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/MetaToolTaskMetadataSummary.java new file mode 100644 index 000000000000..bab0f4a0fbda --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/MetaToolTaskMetadataSummary.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore.tools.metatool; + +import com.google.gson.Gson; +import com.google.gson.GsonBuilder; +import org.apache.hadoop.hive.metastore.ObjectStore; + +import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.sql.SQLException; +import java.util.List; + +public class MetaToolTaskMetadataSummary extends MetaToolTask { + @Override + void execute() { + try { + String[] inputParams = getCl().getMetadataSummaryParams(); + ObjectStore objectStore = getObjectStore(); + List tableSummariesList = objectStore.getMetadataSummary(null, null, null); + if (tableSummariesList == null || tableSummariesList.size() == 0) { + System.out.println("Return set of tables is empty or null"); + return; + } + String filename = null; + if (inputParams.length == 2) { + filename = inputParams[1].toLowerCase().trim(); + } + String formatOption = inputParams[0].toLowerCase().trim(); + switch (formatOption) { + case "-json": + exportInJson(tableSummariesList, filename); + break; + case "-console": + printToConsole(tableSummariesList); + break; + case "-csv": + exportInCsv(tableSummariesList, filename); + break; + default: + System.out.println("Invalid option to -metadataSummary"); + return; + } + } catch (SQLException e) { + System.out.println("Generating HMS Summary failed: \n" + e.getMessage()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Exporting the MetadataSummary in JSON format. + * @param tableSummaryList + * @param filename fully qualified path of the output file + */ + public void exportInJson(List tableSummaryList, String filename) throws IOException { + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + String jsonOutput = gson.toJson(tableSummaryList); + writeJsonInFile(jsonOutput, filename); + } + + /** + * Exporting the MetadataSummary in CONSOLE. + * @param tableSummariesList + */ + public void printToConsole(List tableSummariesList) { + System.out.println("---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- ---- LEGEND ----- ---- ---- ---- --- ----- ---- ---- ---- -----"); + System.out.print("\033[0;1m#COLS\033[0m "); + System.out.print("--> # of columns in the table "); + System.out.print("\033[0;1m#PARTS\033[0m "); + System.out.print("--> # of Partitions "); + System.out.print("\033[0;1m#ROWS\033[0m "); + System.out.print("--> # of rows in table "); + System.out.print("\033[0;1m#FILES\033[0m "); + System.out.print("--> No of files in table "); + System.out.print("\033[0;1mSIZE\033[0m "); + System.out.print("--> Size of table in bytes "); + System.out.print("\033[0;1m#PCOLS\033[0m "); + System.out.print("--> # of partition columns "); + System.out.print("\033[0;1m#ARR\033[0m "); + System.out.print("--> # of array columns "); + System.out.print("\033[0;1m#STRT\033[0m "); + System.out.print("--> # of struct columns "); + System.out.print("\033[0;1m#MAP\033[0m "); + System.out.print("--> # of map columns "); + System.out.println(""); + System.out.println("-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"); + System.out.println(" Metadata Summary "); + System.out.println("-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"); + System.out.print("\033[0;1m"); + System.out.printf("%10s %20s %30s %5s %5s %15s %15s %10s %10s %10s %10s %10s %5s %5s %5s", "CATALOG", "DATABASE", "TABLE NAME", "#COLS", + "#PARTS", "TYPE", "FORMAT", "COMPRESSION", "#ROWS", "#FILES", "SIZE(b)", "#PCOLS", "#ARR", "#STRT", "#MAP"); + System.out.print("\033[0m"); + System.out.println(); + System.out.println("------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"); + for(MetadataTableSummary summary: tableSummariesList) { + System.out.format("%10s %20s %30s %5d %5d %15s %15s %10s %10s %10s %10s %10s %5d %5d %5d", summary.getCtlgName(), summary.getDbName(), + summary.getTblName(), summary.getColCount(), summary.getPartitionCount(), summary.getTableType(), summary.getFileFormat(), + summary.getCompressionType(), summary.getSizeNumRows(), summary.getSizeNumFiles(), summary.getTotalSize(), summary.getPartitionColumnCount(), summary.getArrayColumnCount(), summary.getStructColumnCount(), summary.getMapColumnCount()); + System.out.println(); + } + System.out.println("------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------"); + } + + /** + * Exporting the MetadataSummary in JSON format. + * @param metadataTableSummaryList List of Summary Objects to be printed + * @param filename Fully qualified name of the output file + */ + public void exportInCsv(List metadataTableSummaryList, String filename) throws IOException { + if (filename == null || filename.trim().isEmpty()) { + filename = "./MetastoreSummary.csv"; + } + + PrintWriter pw = null; + File csvOutputFile = null; + try { + csvOutputFile = new File(filename); + pw = new PrintWriter(csvOutputFile); + // print the header + pw.println("Catalog Name, Database Name, Table Name, Column Count, Partition Count, Table Type, File Format, " + + "Compression Type, Number of Rows, Number of Files, Size in Bytes, Partition Column Count, Array Column Count, Struct Column Count, Map Column Count"); + metadataTableSummaryList.stream() + .map(MetadataTableSummary::toCSV) + .forEach(pw::println); + } catch(IOException e) { + System.out.println("IOException occurred: " + e); + throw e; + } finally { + pw.flush(); + pw.close(); + } + } + + /** + * Helper method of exportInJson. + * @param jsonOutput A string, JSON formatted string about metadataSummary. + * @param filename Path of a file in String where the summary needs to be output to. + */ + private void writeJsonInFile(String jsonOutput, String filename) throws IOException { + File jsonOutputFile; + if (filename == null || filename.trim().isEmpty()) { + filename = "./MetastoreSummary.json"; + } + + try { + jsonOutputFile = new File(filename); + if (jsonOutputFile.exists()) { + File oldFile = new File(jsonOutputFile.getAbsolutePath() + "_old"); + System.out.println("Output file already exists, renaming to " + oldFile); + jsonOutputFile.renameTo(oldFile); + } + if (jsonOutputFile.createNewFile()) { + System.out.println("File created: " + jsonOutputFile.getName()); + } else { + System.out.println("File already exists."); + } + } catch (IOException e) { + System.out.println("IOException occurred: " + e); + throw e; + } + + // Try block to check for exceptions + try { + PrintWriter pw = new PrintWriter(jsonOutputFile); + pw.println(jsonOutput); + pw.flush(); + System.out.println("Summary written to " + jsonOutputFile); + } catch (IOException ex) { + // Print message as exception occurred when invalid path of local machine is passed + System.out.println("Failed to write output file:" + ex.getMessage()); + throw ex; + } + } +} \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/MetadataTableSummary.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/MetadataTableSummary.java new file mode 100644 index 000000000000..5cbdc553949a --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/tools/metatool/MetadataTableSummary.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.tools.metatool; + +import java.math.BigInteger; + +public class MetadataTableSummary { + private String ctlgName; + private String dbName; + private String tblName; + private int colCount; + private int partitionColumnCount; + private int partitionCount; + private BigInteger totalSize; + private BigInteger sizeNumRows; + private BigInteger sizeNumFiles; + private String tableType; + private String fileFormat; + private String compressionType; + private int arrayColumnCount; + private int structColumnCount; + private int mapColumnCount; + + + public MetadataTableSummary(String ctlgName, String dbName, String tblName, int colCount, + int partitionColumnCount, int partitionCount, BigInteger totalSize, BigInteger sizeNumRows, + BigInteger sizeNumFiles, String tableType, String fileFormat, String compressionType, + int arrayColumnCount, int structColumnCount, int mapColumnCount) { + this.ctlgName = ctlgName; + this.dbName = dbName; + this.tblName = tblName; + this.colCount = colCount; + this.partitionColumnCount = partitionColumnCount; + this.partitionCount = partitionCount; + this.totalSize = totalSize; + this.sizeNumRows = sizeNumRows; + this.sizeNumFiles = sizeNumFiles; + this.tableType = tableType; + this.fileFormat = fileFormat; + this.compressionType = compressionType; + this.arrayColumnCount = arrayColumnCount; + this.structColumnCount = structColumnCount; + this.mapColumnCount = mapColumnCount; + } + + public MetadataTableSummary() { } + + public String getCtlgName() { + return ctlgName; + } + + public void setCtlgName(String ctlgName) { + this.ctlgName = ctlgName; + } + + public String getDbName() { + return dbName; + } + + public void setDbName(String dbName) { + this.dbName = dbName; + } + + public String getTblName() { + return tblName; + } + + public void setTblName(String tblName) { + this.tblName = tblName; + } + + public int getColCount() { + return colCount; + } + + public void setColCount(int colCount) { + this.colCount = colCount; + } + + public int getPartitionColumnCount() { + return partitionColumnCount; + } + + public void setPartitionColumnCount(int partitionColumnCount) { + this.partitionColumnCount = partitionColumnCount; + } + + public int getPartitionCount() { + return partitionCount; + } + + public void setPartitionCount(int partitionCount) { + this.partitionCount = partitionCount; + } + + public BigInteger getTotalSize() { + return totalSize; + } + + public void setTotalSize(BigInteger totalSize) { + this.totalSize = totalSize; + } + + public BigInteger getSizeNumRows() { + return sizeNumRows; + } + + public void setSizeNumRows(BigInteger sizeNumRows) { + this.sizeNumRows = sizeNumRows; + } + + public BigInteger getSizeNumFiles() { + return sizeNumFiles; + } + + public void setSizeNumFiles(BigInteger sizeNumFiles) { + this.sizeNumFiles = sizeNumFiles; + } + + public String getTableType() { + return tableType; + } + + public void setTableType(String tableType) { + this.tableType = tableType; + } + + public String getFileFormat() { + return fileFormat; + } + + public void setFileFormat(String fileFormat) { + this.fileFormat = fileFormat; + } + + public String getCompressionType() { + return compressionType; + } + + public void setCompressionType(String compressionType) { + this.compressionType = compressionType; + } + + public void setArrayColumnCount(int arrayColumnCount) { + this.arrayColumnCount = arrayColumnCount; + } + + public int getArrayColumnCount() { + return arrayColumnCount; + } + + public void setStructColumnCount(int structColumnCount) { + this.structColumnCount = structColumnCount; + } + + public int getStructColumnCount() { + return structColumnCount; + } + + public void setMapColumnCount(int mapColumnCount) { + this.mapColumnCount = mapColumnCount; + } + + public int getMapColumnCount() { + return mapColumnCount; + } + + + + + @Override + public String toString() { + return "TableSummary {" + + "cat_name='" + ctlgName + '\'' + + ", db_name='" + dbName + '\'' + + ", table_name='" + tblName + '\'' + + ", column_count=" + colCount + + ", partition_count=" + partitionCount + + ", table_type='" + tableType + '\'' + + ", file_format='" + fileFormat + '\'' + + ", compression_type='" + compressionType + + ", size_numRows=" + sizeNumRows + + ", size_numFiles=" + sizeNumFiles + + ", size_bytes=" + totalSize + + ", partition_column_count=" + partitionColumnCount + + ", array_column_count=" + arrayColumnCount + + ", struct_column_count=" + structColumnCount + + ", map_column_count=" + mapColumnCount + + '}'; + } + + public String toCSV() { + return new StringBuilder() + .append(ctlgName).append(",") + .append(dbName).append(",") + .append(tblName).append(",") + .append(colCount).append(",") + .append(partitionCount).append(",") + .append(tableType).append(",") + .append(fileFormat).append(",") + .append(compressionType).append(",") + .append(sizeNumRows).append(",") + .append(sizeNumFiles).append(",") + .append(totalSize).append(",") + .append(partitionColumnCount).append(",") + .append(arrayColumnCount).append(",") + .append(structColumnCount).append(",") + .append(mapColumnCount).append(",") + .toString(); + } +} \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java index e579311d3687..531ae67fcfaa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsDataConverter.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hive.metastore.api.CompactionMetricsDataStruct; import org.apache.hadoop.hive.metastore.api.CompactionMetricsMetricType; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData; public class CompactionMetricsDataConverter { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java index 6bde27a47597..d3b6091574a2 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java @@ -17,45 +17,42 @@ */ package org.apache.hadoop.hive.metastore.txn; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.common.classification.RetrySemantics; -import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier; import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; -import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; -import org.apache.hadoop.hive.metastore.events.CommitCompactionEvent; -import org.apache.hadoop.hive.metastore.messaging.EventMessage; -import org.apache.hadoop.hive.metastore.txn.impl.CleanTxnToWriteIdTableFunction; -import org.apache.hadoop.hive.metastore.txn.impl.FindPotentialCompactionsFunction; -import org.apache.hadoop.hive.metastore.txn.impl.NextCompactionFunction; -import org.apache.hadoop.hive.metastore.txn.impl.ReadyToCleanAbortHandler; -import org.apache.hadoop.hive.metastore.txn.impl.CheckFailedCompactionsHandler; -import org.apache.hadoop.hive.metastore.txn.impl.CompactionMetricsDataHandler; -import org.apache.hadoop.hive.metastore.txn.impl.FindColumnsWithStatsHandler; -import org.apache.hadoop.hive.metastore.txn.impl.GetCompactionInfoHandler; -import org.apache.hadoop.hive.metastore.txn.impl.InsertCompactionInfoCommand; -import org.apache.hadoop.hive.metastore.txn.impl.MarkCleanedFunction; -import org.apache.hadoop.hive.metastore.txn.impl.PurgeCompactionHistoryFunction; -import org.apache.hadoop.hive.metastore.txn.impl.ReadyToCleanHandler; -import org.apache.hadoop.hive.metastore.txn.impl.RemoveCompactionMetricsDataCommand; -import org.apache.hadoop.hive.metastore.txn.impl.RemoveDuplicateCompleteTxnComponentsCommand; -import org.apache.hadoop.hive.metastore.txn.impl.TopCompactionMetricsDataPerTypeFunction; -import org.apache.hadoop.hive.metastore.txn.impl.UpdateCompactionMetricsDataFunction; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionState; +import org.apache.hadoop.hive.metastore.txn.entities.OperationType; +import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus; +import org.apache.hadoop.hive.metastore.txn.jdbc.commands.InsertCompactionInfoCommand; +import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveCompactionMetricsDataCommand; +import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveDuplicateCompleteTxnComponentsCommand; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.CleanTxnToWriteIdTableFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.FindPotentialCompactionsFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.GenerateCompactionQueueIdFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.MarkCleanedFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.MinOpenTxnIdWaterMarkFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.NextCompactionFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.PurgeCompactionHistoryFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.TopCompactionMetricsDataPerTypeFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.UpdateCompactionMetricsDataFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.CheckFailedCompactionsHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.CompactionMetricsDataHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.FindColumnsWithStatsHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetCompactionInfoHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.OpenTxnTimeoutLowBoundaryTxnIdHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.ReadyToCleanAbortHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.ReadyToCleanHandler; import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryHandler; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.dao.DataAccessException; -import org.springframework.jdbc.UncategorizedSQLException; import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; -import java.sql.Connection; -import java.sql.SQLException; -import java.sql.Statement; import java.sql.Types; import java.util.List; import java.util.Optional; @@ -73,26 +70,9 @@ class CompactionTxnHandler extends TxnHandler { private static final Logger LOG = LoggerFactory.getLogger(CompactionTxnHandler.class.getName()); - private static boolean initialized = false; - public CompactionTxnHandler() { } - @Override - public void setConf(Configuration conf) { - super.setConf(conf); - synchronized (CompactionTxnHandler.class) { - if (!initialized) { - int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.HIVE_COMPACTOR_CONNECTION_POOLING_MAX_CONNECTIONS); - try (DataSourceProvider.DataSourceNameConfigurator configurator = - new DataSourceProvider.DataSourceNameConfigurator(conf, "compactor")) { - jdbcResource.registerDataSource(POOL_COMPACTOR, setupJdbcConnectionPool(conf, maxPoolSize)); - initialized = true; - } - } - } - } - /** * This will look through the completed_txn_components table and look for partitions or tables * that may be ready for compaction. Also, look through txns and txn_components tables for @@ -143,7 +123,7 @@ public CompactionInfo findNextToCompact(FindNextCompactRequest rqst) throws Meta if (rqst == null) { throw new MetaException("FindNextCompactRequest is null"); } - long poolTimeout = MetastoreConf.getTimeVar(conf, ConfVars.COMPACTOR_WORKER_POOL_TIMEOUT, TimeUnit.MILLISECONDS); + long poolTimeout = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.COMPACTOR_WORKER_POOL_TIMEOUT, TimeUnit.MILLISECONDS); return new NextCompactionFunction(rqst, getDbTime(), poolTimeout).execute(jdbcResource); } @@ -172,7 +152,7 @@ public void markCompacted(CompactionInfo info) throws MetaException { @Override @RetrySemantics.ReadOnly public List findReadyToClean(long minOpenTxnWaterMark, long retentionTime) throws MetaException { - return jdbcResource.execute(new ReadyToCleanHandler(conf, useMinHistoryWriteId, minOpenTxnWaterMark, retentionTime)); + return jdbcResource.execute(new ReadyToCleanHandler(conf, minOpenTxnWaterMark, retentionTime)); } @Override @@ -227,7 +207,7 @@ private void setCleanerStart(CompactionInfo info, Long timestamp) throws MetaExc @RetrySemantics.CannotRetry public void markCleaned(CompactionInfo info) throws MetaException { LOG.debug("Running markCleaned with CompactionInfo: {}", info); - new MarkCleanedFunction(info, conf).execute(jdbcResource); + new MarkCleanedFunction(info).execute(jdbcResource); } /** @@ -237,7 +217,7 @@ public void markCleaned(CompactionInfo info) throws MetaException { @Override @RetrySemantics.SafeToRetry public void cleanTxnToWriteIdTable() throws MetaException { - new CleanTxnToWriteIdTableFunction(useMinHistoryLevel, findMinTxnIdSeenOpen()).execute(jdbcResource); + new CleanTxnToWriteIdTableFunction(findMinTxnIdSeenOpen()).execute(jdbcResource); } @Override @@ -264,7 +244,7 @@ public void cleanEmptyAbortedAndCommittedTxns() throws MetaException { * 2. never deletes the maximum txnId even if it is before the TXN_OPENTXN_TIMEOUT window */ try { - long lowWaterMark = getOpenTxnTimeoutLowBoundaryTxnId(jdbcResource.getConnection()); + long lowWaterMark = jdbcResource.execute(new OpenTxnTimeoutLowBoundaryTxnIdHandler(openTxnTimeOutMillis)); jdbcResource.execute( "DELETE FROM \"TXNS\" WHERE \"TXN_ID\" NOT IN (SELECT \"TC_TXNID\" FROM \"TXN_COMPONENTS\") " + "AND (\"TXN_STATE\" = :abortedState OR \"TXN_STATE\" = :committedState) AND \"TXN_ID\" < :txnId", @@ -273,7 +253,7 @@ public void cleanEmptyAbortedAndCommittedTxns() throws MetaException { .addValue("abortedState", TxnStatus.ABORTED.getSqlConst(), Types.CHAR) .addValue("committedState", TxnStatus.COMMITTED.getSqlConst(), Types.CHAR), null); - } catch (SQLException e) { + } catch (DataAccessException e) { throw new MetaException("Unable to get the txn id: " + SqlRetryHandler.getMessage(e)); } } @@ -390,7 +370,7 @@ public void updateCompactorState(CompactionInfo ci, long compactionTxnId) throws @Override @RetrySemantics.SafeToRetry public void purgeCompactionHistory() throws MetaException { - new PurgeCompactionHistoryFunction(conf).execute(jdbcResource); + new PurgeCompactionHistoryFunction().execute(jdbcResource); } /** @@ -431,7 +411,7 @@ private void updateStatus(CompactionInfo ci) throws MetaException { if (ciActual.id == 0) { //The failure occurred before we even made an entry in COMPACTION_QUEUE //generate ID so that we can make an entry in COMPLETED_COMPACTIONS - ciActual.id = generateCompactionQueueId(); + ciActual.id = new GenerateCompactionQueueIdFunction().execute(jdbcResource); //this is not strictly accurate, but 'type' cannot be null. if (ciActual.type == null) { ciActual.type = CompactionType.MINOR; @@ -480,7 +460,7 @@ public void setCleanerRetryRetentionTimeOnError(CompactionInfo info) throws Meta * compactions for any resource. */ try (TxnStore.MutexAPI.LockHandle ignored = getMutexAPI().acquireLock(MUTEX_KEY.CompactionScheduler.name())) { - long id = generateCompactionQueueId(); + long id = new GenerateCompactionQueueIdFunction().execute(jdbcResource); int updCnt = jdbcResource.execute( "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", " + " \"CQ_TYPE\", \"CQ_STATE\", \"CQ_RETRY_RETENTION\", \"CQ_ERROR_MESSAGE\", \"CQ_COMMIT_TIME\") " + @@ -527,14 +507,10 @@ public void setHadoopJobId(String hadoopJobId, long id) throws MetaException { @Override @RetrySemantics.Idempotent public long findMinOpenTxnIdForCleaner() throws MetaException { - if (useMinHistoryWriteId) { + if (ConfVars.useMinHistoryWriteId()) { return Long.MAX_VALUE; } - try { - return getMinOpenTxnIdWaterMark(jdbcResource.getConnection()); - } catch (SQLException e) { - throw new UncategorizedSQLException(null, null, e); - } + return new MinOpenTxnIdWaterMarkFunction(openTxnTimeOutMillis).execute(jdbcResource); } /** @@ -546,7 +522,7 @@ public long findMinOpenTxnIdForCleaner() throws MetaException { @RetrySemantics.Idempotent @Deprecated public long findMinTxnIdSeenOpen() { - if (!useMinHistoryLevel || useMinHistoryWriteId) { + if (!ConfVars.useMinHistoryLevel() || ConfVars.useMinHistoryWriteId()) { return Long.MAX_VALUE; } try { @@ -554,53 +530,20 @@ public long findMinTxnIdSeenOpen() { new MapSqlParameterSource(), Long.class); return minId == null ? Long.MAX_VALUE : minId; } catch (DataAccessException e) { - if (e.getCause() instanceof SQLException) { - if (dbProduct.isTableNotExistsError((SQLException) e.getCause())) { - useMinHistoryLevel = false; - return Long.MAX_VALUE; - } + if (dbProduct.isTableNotExistsError(e)) { + ConfVars.setUseMinHistoryLevel(false); + return Long.MAX_VALUE; } LOG.error("Unable to execute findMinTxnIdSeenOpen", e); throw e; } } - @Override - protected void updateWSCommitIdAndCleanUpMetadata(Statement stmt, long txnid, TxnType txnType, - Long commitId, long tempId) throws SQLException, MetaException { - super.updateWSCommitIdAndCleanUpMetadata(stmt, txnid, txnType, commitId, tempId); - - if (txnType == TxnType.SOFT_DELETE || txnType == TxnType.COMPACTION) { - stmt.executeUpdate("UPDATE \"COMPACTION_QUEUE\" SET \"CQ_NEXT_TXN_ID\" = " + commitId + ", \"CQ_COMMIT_TIME\" = " + - getEpochFn(dbProduct) + " WHERE \"CQ_TXN_ID\" = " + txnid); - } - } - @Override public Optional getCompactionByTxnId(long txnId) throws MetaException { return Optional.ofNullable(jdbcResource.execute(new GetCompactionInfoHandler(txnId, true))); } - @Override - protected void createCommitNotificationEvent(Connection conn, long txnid, TxnType txnType) - throws MetaException, SQLException { - super.createCommitNotificationEvent(conn, txnid, txnType); - if (transactionalListeners != null) { - //Please note that TxnHandler and CompactionTxnHandler are using different DataSources (to have different pools). - //This call must use the same transaction and connection as TxnHandler.commitTxn(), therefore we are passing the - //datasource wrapper comming from TxnHandler. Without this, the getCompactionByTxnId(long txnId) call would be - //executed using a different connection obtained from CompactionTxnHandler's own datasourceWrapper. - CompactionInfo compactionInfo = getCompactionByTxnId(txnid).orElse(null); - if (compactionInfo != null) { - MetaStoreListenerNotifier - .notifyEventWithDirectSql(transactionalListeners, EventMessage.EventType.COMMIT_COMPACTION, - new CommitCompactionEvent(txnid, compactionInfo), conn, sqlGenerator); - } else { - LOG.warn("No compaction queue record found for Compaction type transaction commit. txnId:" + txnid); - } - } - } - @Override public boolean updateCompactionMetricsData(CompactionMetricsData data) throws MetaException { return new UpdateCompactionMetricsDataFunction(data).execute(jdbcResource); @@ -609,7 +552,7 @@ public boolean updateCompactionMetricsData(CompactionMetricsData data) throws Me @Override public List getTopCompactionMetricsDataPerType(int limit) throws MetaException { - return new TopCompactionMetricsDataPerTypeFunction(limit, sqlGenerator).execute(jdbcResource); + return new TopCompactionMetricsDataPerTypeFunction(limit).execute(jdbcResource); } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/DefaultTxnLockManager.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/DefaultTxnLockManager.java new file mode 100644 index 000000000000..39b760d016ee --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/DefaultTxnLockManager.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.txn.entities.LockInfo; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.CheckLockFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.EnqueueLockFunction; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetLocksByLockId; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.ShowLocksHandler; +import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource; +import org.apache.hadoop.hive.metastore.txn.jdbc.RollbackException; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; + +import java.sql.Types; +import java.util.List; + +import static org.apache.hadoop.hive.metastore.txn.entities.LockInfo.LOCK_WAITING; + +public class DefaultTxnLockManager implements TxnLockManager { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultTxnLockManager.class); + + private final MultiDataSourceJdbcResource jdbcResource; + + public DefaultTxnLockManager(MultiDataSourceJdbcResource jdbcResource) { + this.jdbcResource = jdbcResource; + } + + @Override + public long enqueueLock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { + return new EnqueueLockFunction(rqst).execute(jdbcResource); + } + + @Override + public LockResponse checkLock(long extLockId, long txnId, boolean zeroWaitReadEnabled, boolean isExclusiveCTAS) + throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { + return new CheckLockFunction(extLockId, txnId, zeroWaitReadEnabled, isExclusiveCTAS).execute(jdbcResource); + } + + @Override + public void unlock(UnlockRequest rqst) throws TxnOpenException, MetaException { + long extLockId = rqst.getLockid(); + /** + * This method is logically like commit for read-only auto commit queries. + * READ_COMMITTED since this only has 1 delete statement and no new entries with the + * same hl_lock_ext_id can be added, i.e. all rows with a given hl_lock_ext_id are + * created in a single atomic operation. + * Theoretically, this competes with {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)} + * but hl_lock_ext_id is not known until that method returns. + * Also competes with {@link #checkLock(org.apache.hadoop.hive.metastore.api.CheckLockRequest)} + * but using SERIALIZABLE doesn't materially change the interaction. + * If "delete" stmt misses, additional logic is best effort to produce meaningful error msg. + */ + //hl_txnid <> 0 means it's associated with a transaction + int rc = jdbcResource.getJdbcTemplate().update("DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = :extLockId " + + " AND (\"HL_TXNID\" = 0 OR (\"HL_TXNID\" <> 0 AND \"HL_LOCK_STATE\" = :state))", + new MapSqlParameterSource() + .addValue("extLockId", extLockId) + .addValue("state", Character.toString(LOCK_WAITING), Types.CHAR)); + //(hl_txnid <> 0 AND hl_lock_state = '" + LOCK_WAITING + "') is for multi-statement txns where + //some query attempted to lock (thus LOCK_WAITING state) but is giving up due to timeout for example + if (rc < 1) { + LOG.info("Failure to unlock any locks with extLockId={}.", extLockId); + List lockInfos = jdbcResource.execute(new GetLocksByLockId(extLockId, 1, jdbcResource.getSqlGenerator())); + if (CollectionUtils.isEmpty(lockInfos)) { + //didn't find any lock with extLockId but at ReadCommitted there is a possibility that + //it existed when above delete ran but it didn't have the expected state. + LOG.info("No lock in {} mode found for unlock({})", LOCK_WAITING, + JavaUtils.lockIdToString(rqst.getLockid())); + + //bail here to make the operation idempotent + throw new RollbackException(null); + } + LockInfo lockInfo = lockInfos.get(0); + if (TxnUtils.isValidTxn(lockInfo.getTxnId())) { + String msg = "Unlocking locks associated with transaction not permitted. " + lockInfo; + //if a lock is associated with a txn we can only "unlock" it if it's in WAITING state + // which really means that the caller wants to give up waiting for the lock + LOG.error(msg); + throw new TxnOpenException(msg); + } else { + //we didn't see this lock when running DELETE stmt above but now it showed up + //so should "should never happen" happened... + String msg = "Found lock in unexpected state " + lockInfo; + LOG.error(msg); + throw new MetaException(msg); + } + } + LOG.debug("Successfully unlocked at least 1 lock with extLockId={}", extLockId); + } + + @Override + public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { + return jdbcResource.execute(new ShowLocksHandler(rqst)); + } + +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/ProxyTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TransactionalRetryProxy.java similarity index 61% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/ProxyTxnHandler.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TransactionalRetryProxy.java index 10a6e9b7654f..0b85b450f3c8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/ProxyTxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TransactionalRetryProxy.java @@ -17,16 +17,19 @@ */ package org.apache.hadoop.hive.metastore.txn; +import org.apache.commons.lang3.ClassUtils; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource; +import org.apache.hadoop.hive.metastore.txn.jdbc.RollbackException; import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryCallProperties; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetry; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryFunction; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryHandler; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetry; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryCallProperties; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryFunction; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.dao.DataAccessException; import org.springframework.transaction.annotation.Transactional; import java.lang.reflect.InvocationHandler; @@ -37,45 +40,44 @@ import java.util.Arrays; /** - * Responsible for processing the following annotations: {@link SqlRetry} and {@link Transactional}. The annotated methods - * will be called accordingly: + * Responsible for proxying an interface, and calling its methods by processing the following annotations: + * {@link SqlRetry} and {@link Transactional}. The annotated methods will be called accordingly: *
    *
  • SQL errors in methods annotated with {@link SqlRetry} will be caught and the method will be re-executed
  • *
  • Methods annotated with {@link Transactional} will be executed after creating a transaction, and all operations done - * via {@link MultiDataSourceJdbcResource}, {@link org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedQuery}, - * {@link org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand} and - * {@link org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler} will use the created transaction.
  • + * via {@link MultiDataSourceJdbcResource} will use the created transaction. *
  • In case a method is annotated with both annotations, the transaction will be inside the retry-call. This means * in case of SQL errors and retries, the transaction will be rolled back and a new one will be created for each retry * attempt.
  • *
* Not annotated methods are called directly. */ -public class ProxyTxnHandler implements InvocationHandler { +public class TransactionalRetryProxy implements InvocationHandler { - private static final Logger LOG = LoggerFactory.getLogger(ProxyTxnHandler.class); + private static final Logger LOG = LoggerFactory.getLogger(TransactionalRetryProxy.class); /** * Gets the proxy interface for the given {@link TxnStore}. * - * @param realStore The real {@link TxnStore} to proxy. * @param sqlRetryHandler Responsible to re-execute the methods in case of failure. + * @param interfaceObject The real object to proxy. * @return Returns the proxy object capable of retrying the failed calls automatically and transparently. */ - public static TxnStore getProxy(TxnStore realStore, SqlRetryHandler sqlRetryHandler, MultiDataSourceJdbcResource jdbcResourceHandler) { - ProxyTxnHandler handler = new ProxyTxnHandler(realStore, sqlRetryHandler, jdbcResourceHandler); - return (TxnStore) Proxy.newProxyInstance( - ProxyTxnHandler.class.getClassLoader(), - new Class[]{ TxnStore.class }, + public static T getProxy(SqlRetryHandler sqlRetryHandler, MultiDataSourceJdbcResource jdbcResourceHandler, T interfaceObject) { + TransactionalRetryProxy handler = new TransactionalRetryProxy<>(interfaceObject, sqlRetryHandler, jdbcResourceHandler); + //noinspection unchecked + return (T) Proxy.newProxyInstance( + TransactionalRetryProxy.class.getClassLoader(), + ClassUtils.getAllInterfaces(interfaceObject.getClass()).toArray(new Class[0]), handler); } + private final T interfaceObject; private final SqlRetryHandler sqlRetryHandler; - private final TxnStore realStore; private final MultiDataSourceJdbcResource jdbcResource; - private ProxyTxnHandler(TxnStore realStore, SqlRetryHandler sqlRetryHandler, MultiDataSourceJdbcResource jdbcResource) { - this.realStore = realStore; + private TransactionalRetryProxy(T interfaceObject, SqlRetryHandler sqlRetryHandler, MultiDataSourceJdbcResource jdbcResource) { + this.interfaceObject = interfaceObject; this.sqlRetryHandler = sqlRetryHandler; this.jdbcResource = jdbcResource; } @@ -95,7 +97,7 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl ThrowingSupplier functionToCall = () -> { try { - return method.invoke(realStore, args); + return method.invoke(interfaceObject, args); } catch (InvocationTargetException | UndeclaredThrowableException e) { throw e.getCause(); } @@ -108,23 +110,36 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl TransactionContext context = null; try { jdbcResource.bindDataSource(transactional); - context = jdbcResource.getTransactionManager().getTransaction(transactional.propagation().value()); + context = jdbcResource.getTransactionManager().getNewTransaction(transactional.propagation().value()); Object result = toCall.execute(); LOG.debug("Successfull method invocation within transactional context: {}, going to commit.", callerId); - jdbcResource.getTransactionManager().commit(context); + if (context.isRollbackOnly()) { + jdbcResource.getTransactionManager().rollback(context); + } else if (!context.isCompleted()) { + jdbcResource.getTransactionManager().commit(context); + } return result; + } catch (RollbackException e) { + if (context != null && !context.isCompleted()) { + jdbcResource.getTransactionManager().rollback(context); + } + return e.getResult(); } catch (Exception e) { - if (Arrays.stream(transactional.noRollbackFor()).anyMatch(ex -> ex.isInstance(e)) || - Arrays.stream(transactional.noRollbackForClassName()).anyMatch(exName -> exName.equals(e.getClass().getName()))) { - throw e; - } if (context != null) { - if (transactional.rollbackFor().length > 0 || transactional.rollbackForClassName().length > 0) { + if (transactional.noRollbackFor().length > 0 || transactional.noRollbackForClassName().length > 0) { + if (Arrays.stream(transactional.noRollbackFor()).anyMatch(ex -> ex.isInstance(e)) || + Arrays.stream(transactional.noRollbackForClassName()).anyMatch(exName -> exName.equals(e.getClass().getName()))) { + jdbcResource.getTransactionManager().commit(context); + } else { + jdbcResource.getTransactionManager().rollback(context); + } + } else if (transactional.rollbackFor().length > 0 || transactional.rollbackForClassName().length > 0) { if (Arrays.stream(transactional.rollbackFor()).anyMatch(ex -> ex.isInstance(e)) || Arrays.stream(transactional.rollbackForClassName()).anyMatch(exName -> exName.equals(e.getClass().getName()))) { jdbcResource.getTransactionManager().rollback(context); + } else { + jdbcResource.getTransactionManager().commit(context); } - throw e; } else { jdbcResource.getTransactionManager().rollback(context); } @@ -149,15 +164,23 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl LOG.debug("Successfull method invocation within retry context: {}", callerId); return result; } catch (IllegalAccessException | InvocationTargetException | UndeclaredThrowableException e) { - if (e.getCause() instanceof MetaException) { - throw (MetaException) e.getCause(); + if (e.getCause() instanceof TException) { + throw (TException) e.getCause(); } else if (e.getCause() instanceof RuntimeException) { throw (RuntimeException) e.getCause(); } else { throw new RuntimeException(e); } - } catch (Throwable e) { - throw new RuntimeException(e); + } catch (TException | DataAccessException e) { + throw e; + } catch (Throwable e) { + if (e instanceof RuntimeException) { + throw (RuntimeException) e; + } else if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException)e.getCause(); + } else { + throw new RuntimeException(e); + } } }; return sqlRetryHandler.executeWithRetry(properties, retryWrapper); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java index c661e34b0739..92437bdbba03 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java @@ -17,76 +17,19 @@ */ package org.apache.hadoop.hive.metastore.txn; -import java.io.PrintWriter; -import java.io.Serializable; -import java.nio.ByteBuffer; -import java.sql.Connection; -import java.sql.Driver; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.SQLFeatureNotSupportedException; -import java.sql.Statement; -import java.sql.Timestamp; -import java.text.MessageFormat; -import java.time.Instant; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.BitSet; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Properties; -import java.util.Set; -import java.util.TreeSet; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Semaphore; -import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Function; -import java.util.function.Predicate; -import java.util.regex.Pattern; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import javax.sql.DataSource; - -import com.google.common.collect.ImmutableList; +import com.google.common.annotations.VisibleForTesting; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.time.StopWatch; -import org.apache.commons.lang3.ArrayUtils; -import org.apache.commons.lang3.NotImplementedException; -import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.MaterializationSnapshot; -import org.apache.hadoop.hive.common.TableName; -import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; -import org.apache.hadoop.hive.common.ValidReadTxnList; -import org.apache.hadoop.hive.common.ValidReaderWriteIdList; import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.common.ValidTxnWriteIdList; -import org.apache.hadoop.hive.common.ValidWriteIdList; import org.apache.hadoop.hive.common.classification.RetrySemantics; -import org.apache.hadoop.hive.common.repl.ReplConst; import org.apache.hadoop.hive.metastore.DatabaseProduct; -import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier; import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener; -import org.apache.hadoop.hive.metastore.LockTypeComparator; import org.apache.hadoop.hive.metastore.api.AbortCompactResponse; import org.apache.hadoop.hive.metastore.api.AbortCompactionRequest; -import org.apache.hadoop.hive.metastore.api.AbortCompactionResponseElement; -import org.apache.hadoop.hive.metastore.api.NoSuchCompactionException; import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; @@ -94,14 +37,10 @@ import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; import org.apache.hadoop.hive.metastore.api.CheckLockRequest; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; -import org.apache.hadoop.hive.metastore.api.CompactionInfoStruct; import org.apache.hadoop.hive.metastore.api.CompactionRequest; import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.CreationMetadata; -import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.Database; -import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.GetLatestCommittedCompactionInfoRequest; import org.apache.hadoop.hive.metastore.api.GetLatestCommittedCompactionInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; @@ -112,104 +51,102 @@ import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; import org.apache.hadoop.hive.metastore.api.HiveObjectType; -import org.apache.hadoop.hive.metastore.api.LockComponent; import org.apache.hadoop.hive.metastore.api.LockRequest; import org.apache.hadoop.hive.metastore.api.LockResponse; -import org.apache.hadoop.hive.metastore.api.LockState; -import org.apache.hadoop.hive.metastore.api.LockType; import org.apache.hadoop.hive.metastore.api.Materialization; import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdRequest; import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdResponse; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchCompactionException; import org.apache.hadoop.hive.metastore.api.NoSuchLockException; import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.ReplLastIdInfo; import org.apache.hadoop.hive.metastore.api.ReplTblWriteIdStateRequest; import org.apache.hadoop.hive.metastore.api.SeedTableWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.SeedTxnIdRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; -import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnOpenException; -import org.apache.hadoop.hive.metastore.api.TxnToWriteId; import org.apache.hadoop.hive.metastore.api.TxnType; import org.apache.hadoop.hive.metastore.api.UnlockRequest; import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest; -import org.apache.hadoop.hive.metastore.api.WriteEventInfo; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory; import org.apache.hadoop.hive.metastore.events.AbortTxnEvent; -import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent; -import org.apache.hadoop.hive.metastore.events.CommitTxnEvent; -import org.apache.hadoop.hive.metastore.events.ListenerEvent; -import org.apache.hadoop.hive.metastore.events.OpenTxnEvent; import org.apache.hadoop.hive.metastore.events.AcidWriteEvent; +import org.apache.hadoop.hive.metastore.events.ListenerEvent; import org.apache.hadoop.hive.metastore.messaging.EventMessage; import org.apache.hadoop.hive.metastore.metrics.Metrics; import org.apache.hadoop.hive.metastore.metrics.MetricsConstants; import org.apache.hadoop.hive.metastore.tools.SQLGenerator; -import org.apache.hadoop.hive.metastore.txn.impl.InsertCompactionInfoCommand; -import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryCallProperties; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionState; +import org.apache.hadoop.hive.metastore.txn.entities.LockInfo; +import org.apache.hadoop.hive.metastore.txn.entities.MetricsInfo; +import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus; +import org.apache.hadoop.hive.metastore.txn.jdbc.commands.*; +import org.apache.hadoop.hive.metastore.txn.jdbc.functions.*; +import org.apache.hadoop.hive.metastore.txn.jdbc.queries.*; import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryFunction; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryHandler; -import org.apache.hadoop.hive.metastore.utils.FileUtils; +import org.apache.hadoop.hive.metastore.txn.jdbc.NoPoolConnectionPool; +import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryCallProperties; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryException; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.utils.JavaUtils; -import org.apache.hadoop.hive.metastore.utils.LockTypeUtil; import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils; -import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; -import org.apache.hadoop.hive.metastore.utils.StringableMap; import org.apache.hadoop.util.StringUtils; +import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; - -import com.google.common.base.Splitter; - -import static org.apache.commons.lang3.StringUtils.isNotBlank; -import static org.apache.commons.lang3.StringUtils.isBlank; -import static org.apache.commons.lang3.StringUtils.repeat; -import static org.apache.commons.lang3.StringUtils.EMPTY; -import static org.apache.commons.lang3.StringUtils.wrap; -import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn; -import static org.apache.hadoop.hive.metastore.txn.TxnUtils.executeQueriesInBatchNoCount; -import static org.apache.hadoop.hive.metastore.txn.TxnUtils.executeQueriesInBatch; -import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getFullTableName; -import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; -import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier; -import static org.springframework.transaction.TransactionDefinition.PROPAGATION_REQUIRED; - -import com.google.common.annotations.VisibleForTesting; import org.springframework.dao.DataAccessException; import org.springframework.jdbc.UncategorizedSQLException; import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Timestamp; +import java.text.MessageFormat; +import java.time.Instant; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiPredicate; + +import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog; + /** * A handler to answer transaction related calls that come into the metastore * server. - * + *

* Note on log messages: Please include txnid:X and lockid info using * {@link JavaUtils#txnIdToString(long)} * and {@link JavaUtils#lockIdToString(long)} in all messages. * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated, * so keeping the format consistent makes grep'ing the logs much easier. - * + *

* Note on HIVE_LOCKS.hl_last_heartbeat. * For locks that are part of transaction, we set this 0 (would rather set it to NULL but * Currently the DB schema has this NOT NULL) and only update/read heartbeat from corresponding * transaction in TXNS. - * + *

* In general there can be multiple metastores where this logic can execute, thus the DB is * used to ensure proper mutexing of operations. * Select ... For Update (or equivalent: either MsSql with(updlock) or actual Update stmt) is @@ -221,22 +158,21 @@ * This allows almost all operations to run at READ_COMMITTED and minimizes DB deadlocks. * 3. checkLock() - this is mutexted entirely since we must ensure that while we check if some lock * can be granted, no other (strictly speaking "earlier") lock can change state. - * - * The exception to his is Derby which doesn't support proper S4U. Derby is always running embedded + *

+ * The exception to this is Derby which doesn't support proper S4U. Derby is always running embedded * (this is the only supported configuration for Derby) * in the same JVM as HiveMetaStoreHandler thus we use JVM wide lock to properly sequnce the operations. - * - * {@link #derbyLock} + *

* If we ever decide to run remote Derby server, according to * https://db.apache.org/derby/docs/10.0/manuals/develop/develop78.html all transactions will be * seriazlied, so that would also work though has not been tested. - * + *

* General design note: * It's imperative that any operation on a txn (e.g. commit), ensure (atomically) that this txn is * still valid and active. In the code this is usually achieved at the same time the txn record * is locked for some operation. - * + *

* Note on retry logic: * Metastore has retry logic in both {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient} * and {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler}. The retry logic there is very @@ -248,136 +184,82 @@ * the metstore call stack should have logic not to retry. There are {@link RetrySemantics} * annotations to document the behavior. */ +@SuppressWarnings("SqlSourceToSinkFlow") @InterfaceAudience.Private @InterfaceStability.Evolving -abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { - - private static final String TXN_TMP_STATE = "_"; - private static final String DEFAULT_POOL_NAME = "default"; +public abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI { - // Lock states - static final protected char LOCK_ACQUIRED = 'a'; - static final protected char LOCK_WAITING = 'w'; + + public final static class ConfVars { + + private ConfVars() {} - private static final int ALLOWED_REPEATED_DEADLOCKS = 10; - private static final Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName()); + // Whether to use min_history_level table or not. + // At startup we read it from the config, but set it to false if min_history_level does nto exists. + private boolean useMinHistoryLevel; + private boolean useMinHistoryWriteId; - private static DataSource connPool; - private static DataSource connPoolMutex; + public boolean useMinHistoryLevel() { + return useMinHistoryLevel; + } - private static final String MANUAL_RETRY = "ManualRetry"; + public void setUseMinHistoryLevel(boolean useMinHistoryLevel) { + this.useMinHistoryLevel = useMinHistoryLevel; + } - // Query definitions - private static final String HIVE_LOCKS_INSERT_QRY = "INSERT INTO \"HIVE_LOCKS\" ( " + - "\"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", " + - "\"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", \"HL_USER\", \"HL_HOST\", \"HL_AGENT_INFO\") " + - "VALUES (?, ?, ?, ?, ?, ?, ?, ?, %s, ?, ?, ?)"; - private static final String TXN_COMPONENTS_INSERT_QUERY = "INSERT INTO \"TXN_COMPONENTS\" (" + - "\"TC_TXNID\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_OPERATION_TYPE\", \"TC_WRITEID\")" + - " VALUES (?, ?, ?, ?, ?, ?)"; - private static final String TXN_COMPONENTS_DP_DELETE_QUERY = "DELETE FROM \"TXN_COMPONENTS\" " + - "WHERE \"TC_TXNID\" = ? AND \"TC_DATABASE\" = ? AND \"TC_TABLE\" = ? AND \"TC_PARTITION\" IS NULL"; - private static final String INCREMENT_NEXT_LOCK_ID_QUERY = "UPDATE \"NEXT_LOCK_ID\" SET \"NL_NEXT\" = %s"; - private static final String UPDATE_HIVE_LOCKS_EXT_ID_QUERY = "UPDATE \"HIVE_LOCKS\" SET \"HL_LOCK_EXT_ID\" = %s " + - "WHERE \"HL_LOCK_EXT_ID\" = %s"; - private static final String SELECT_WRITE_ID_QUERY = "SELECT \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE" + - " \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND \"T2W_TXNID\" = ?"; - private static final String COMPL_TXN_COMPONENTS_INSERT_QUERY = "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" " + - "(\"CTC_TXNID\"," + " \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\")" + - " VALUES (%s, ?, ?, ?, ?, %s)"; - private static final String TXNS_INSERT_QRY = "INSERT INTO \"TXNS\" " + - "(\"TXN_STATE\", \"TXN_STARTED\", \"TXN_LAST_HEARTBEAT\", \"TXN_USER\", \"TXN_HOST\", \"TXN_TYPE\") " + - "VALUES(?,%s,%s,?,?,?)"; - private static final String SELECT_LOCKS_FOR_LOCK_ID_QUERY = "SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", " + - "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_TXNID\" " + - "FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = ?"; - private static final String SELECT_TIMED_OUT_LOCKS_QUERY = "SELECT DISTINCT \"HL_LOCK_EXT_ID\" FROM \"HIVE_LOCKS\" " + - "WHERE \"HL_LAST_HEARTBEAT\" < %s - :timeout AND \"HL_TXNID\" = 0"; - private static final String TXN_TO_WRITE_ID_INSERT_QUERY = "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", " + - "\"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?)"; - private static final String MIN_HISTORY_WRITE_ID_INSERT_QUERY = "INSERT INTO \"MIN_HISTORY_WRITE_ID\" (\"MH_TXNID\", " + - "\"MH_DATABASE\", \"MH_TABLE\", \"MH_WRITEID\") VALUES (?, ?, ?, ?)"; - private static final String SELECT_NWI_NEXT_FROM_NEXT_WRITE_ID = - "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = ? AND \"NWI_TABLE\" = ?"; - private static final String SELECT_METRICS_INFO_QUERY = - "SELECT * FROM (SELECT COUNT(*) FROM \"TXN_TO_WRITE_ID\") \"TTWID\" CROSS JOIN (" + - "SELECT COUNT(*) FROM \"COMPLETED_TXN_COMPONENTS\") \"CTC\" CROSS JOIN (" + - "SELECT COUNT(*), MIN(\"TXN_ID\"), ({0} - MIN(\"TXN_STARTED\"))/1000 FROM \"TXNS\" WHERE \"TXN_STATE\"='" + - TxnStatus.OPEN + "' AND \"TXN_TYPE\" = "+ TxnType.REPL_CREATED.getValue() +") \"TR\" CROSS JOIN (" + - "SELECT COUNT(*), MIN(\"TXN_ID\"), ({0} - MIN(\"TXN_STARTED\"))/1000 FROM \"TXNS\" WHERE \"TXN_STATE\"='" + - TxnStatus.OPEN + "' AND \"TXN_TYPE\" != "+ TxnType.REPL_CREATED.getValue() +") \"T\" CROSS JOIN (" + - "SELECT COUNT(*), MIN(\"TXN_ID\"), ({0} - MIN(\"TXN_STARTED\"))/1000 FROM \"TXNS\" WHERE \"TXN_STATE\"='" + - TxnStatus.ABORTED + "') \"A\" CROSS JOIN (" + - "SELECT COUNT(*), ({0} - MIN(\"HL_ACQUIRED_AT\"))/1000 FROM \"HIVE_LOCKS\") \"HL\" CROSS JOIN (" + - "SELECT ({0} - MIN(\"CQ_COMMIT_TIME\"))/1000 from \"COMPACTION_QUEUE\" WHERE " + - "\"CQ_STATE\"=''" + READY_FOR_CLEANING + "'') OLDEST_CLEAN"; - private static final String SELECT_TABLES_WITH_X_ABORTED_TXNS = - "SELECT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" FROM \"TXN_COMPONENTS\" " + - "INNER JOIN \"TXNS\" ON \"TC_TXNID\" = \"TXN_ID\" WHERE \"TXN_STATE\" = " + TxnStatus.ABORTED + - " GROUP BY \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" HAVING COUNT(\"TXN_ID\") > ?"; - - private static final String EXCL_CTAS_ERR_MSG = - "Failed to initiate a concurrent CTAS operation with the same table name, lockInfo : %s"; - private static final String ZERO_WAIT_READ_ERR_MSG = "Unable to acquire read lock due to an existing exclusive lock {%s}"; + public boolean useMinHistoryWriteId() { + return useMinHistoryWriteId; + } + public void setUseMinHistoryWriteId(boolean useMinHistoryWriteId) { + this.useMinHistoryWriteId = useMinHistoryWriteId; + } - protected List transactionalListeners; + public void init(BiPredicate tableCheck, Configuration conf){ + useMinHistoryWriteId = tableCheck.test("MIN_HISTORY_WRITE_ID", + MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.TXN_USE_MIN_HISTORY_WRITE_ID)); + useMinHistoryLevel = tableCheck.test("MIN_HISTORY_LEVEL", + MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.TXN_USE_MIN_HISTORY_LEVEL)); + + } + } + + private static final Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName()); + public static final TxnHandler.ConfVars ConfVars = new ConfVars(); // Maximum number of open transactions that's allowed private static volatile int maxOpenTxns = 0; // Whether number of open transactions reaches the threshold private static volatile boolean tooManyOpenTxns = false; + // Current number of open txns + private static AtomicInteger numOpenTxns; + + private static volatile boolean initialized = false; + private static DataSource connPool; + private static DataSource connPoolMutex; + protected static DataSource connPoolCompactor; + + protected static DatabaseProduct dbProduct; + protected static SQLGenerator sqlGenerator; + protected static long openTxnTimeOutMillis; /** * Number of consecutive deadlocks we have seen */ - private int deadlockCnt; - private long deadlockRetryInterval; protected Configuration conf; - protected static DatabaseProduct dbProduct; - protected static SQLGenerator sqlGenerator; - private static long openTxnTimeOutMillis; + protected List transactionalListeners; // (End user) Transaction timeout, in milliseconds. private long timeout; private long replicationTxnTimeout; - private int maxBatchSize; - private String identifierQuoteString; // quotes to use for quoting tables, where necessary - private long retryInterval; - private int retryLimit; - private int retryNum; - // Current number of open txns - private AtomicInteger numOpenTxns; - // Whether to use min_history_level table or not. - // At startup we read it from the config, but set it to false if min_history_level does nto exists. - static boolean useMinHistoryLevel; - static boolean useMinHistoryWriteId; - - private static SqlRetryHandler sqlRetryHandler; - protected static MultiDataSourceJdbcResource jdbcResource; + private MutexAPI mutexAPI; + private TxnLockManager txnLockManager; + private SqlRetryHandler sqlRetryHandler; + protected MultiDataSourceJdbcResource jdbcResource; - /** - * Derby specific concurrency control - */ - private static final ReentrantLock derbyLock = new ReentrantLock(true); - /** - * must be static since even in UT there may be > 1 instance of TxnHandler - * (e.g. via Compactor services) - */ - private final static ConcurrentHashMap derbyKey2Lock = new ConcurrentHashMap<>(); private static final String hostname = JavaUtils.hostname(); - // Private methods should never catch SQLException and then throw MetaException. The public - // methods depend on SQLException coming back so they can detect and handle deadlocks. Private - // methods should only throw MetaException when they explicitly know there's a logic error and - // they want to throw past the public methods. - // - // All public methods that write to the database have to check for deadlocks when a SQLException - // comes back and handle it if they see one. This has to be done with the connection pooling - // in mind. To do this they should call checkRetryable() AFTER rolling back the db transaction, - // and then they should catch RetryException and call themselves recursively. See commitTxn for an example. - public TxnHandler() { } @@ -385,123 +267,86 @@ public TxnHandler() { * This is logically part of c'tor and must be called prior to any other method. * Not physically part of c'tor due to use of reflection */ - public void setConf(Configuration conf){ + @Override + public void setConf(Configuration conf) { this.conf = conf; - int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS); - synchronized (TxnHandler.class) { - try (DataSourceProvider.DataSourceNameConfigurator configurator = - new DataSourceProvider.DataSourceNameConfigurator(conf, POOL_TX)) { - if (connPool == null) { - connPool = setupJdbcConnectionPool(conf, maxPoolSize); - } - if (connPoolMutex == null) { - configurator.resetName(POOL_MUTEX); - connPoolMutex = setupJdbcConnectionPool(conf, maxPoolSize); - } - } - if (dbProduct == null) { - try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED)) { - determineDatabaseProduct(dbConn); - } catch (SQLException e) { - LOG.error("Unable to determine database product", e); - throw new RuntimeException(e); - } - } + if (!initialized) { + synchronized (TxnHandler.class) { + if (!initialized) { + try (DataSourceProvider.DataSourceNameConfigurator configurator = + new DataSourceProvider.DataSourceNameConfigurator(conf, POOL_TX)) { + int maxPoolSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS); + if (connPool == null) { + connPool = setupJdbcConnectionPool(conf, maxPoolSize); + } + if (connPoolMutex == null) { + configurator.resetName(POOL_MUTEX); + connPoolMutex = setupJdbcConnectionPool(conf, maxPoolSize); + } + if (connPoolCompactor == null) { + configurator.resetName(POOL_COMPACTOR); + connPoolCompactor = setupJdbcConnectionPool(conf, + MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.HIVE_COMPACTOR_CONNECTION_POOLING_MAX_CONNECTIONS)); + } + } + if (dbProduct == null) { + try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED, connPool)) { + determineDatabaseProduct(dbConn); + } catch (SQLException e) { + LOG.error("Unable to determine database product", e); + throw new RuntimeException(e); + } + } + if (sqlGenerator == null) { + sqlGenerator = new SQLGenerator(dbProduct, conf); + } + + initJdbcResource(); - if (sqlGenerator == null) { - sqlGenerator = new SQLGenerator(dbProduct, conf); + try { + TxnHandler.ConfVars.init(this::checkIfTableIsUsable, conf); + } catch (Exception e) { + String msg = "Error during TxnHandler initialization, " + e.getMessage(); + LOG.error(msg); + throw e; + } + initialized = true; + } } - - if (jdbcResource == null) { - jdbcResource = new MultiDataSourceJdbcResource(dbProduct); - jdbcResource.registerDataSource(POOL_TX, connPool); - jdbcResource.registerDataSource(POOL_MUTEX, connPoolMutex); - } } - numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS); + initJdbcResource(); - timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS); - replicationTxnTimeout = MetastoreConf.getTimeVar(conf, ConfVars.REPL_TXN_TIMEOUT, TimeUnit.MILLISECONDS); - retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMS_HANDLER_INTERVAL, - TimeUnit.MILLISECONDS); - retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS); - deadlockRetryInterval = retryInterval / 10; - maxOpenTxns = MetastoreConf.getIntVar(conf, ConfVars.MAX_OPEN_TXNS); - maxBatchSize = MetastoreConf.getIntVar(conf, ConfVars.JDBC_MAX_BATCH_SIZE); + mutexAPI = new TxnStoreMutex(sqlGenerator, jdbcResource); - openTxnTimeOutMillis = MetastoreConf.getTimeVar(conf, ConfVars.TXN_OPENTXN_TIMEOUT, TimeUnit.MILLISECONDS); - - try { - useMinHistoryWriteId = checkIfTableIsUsable("MIN_HISTORY_WRITE_ID", - MetastoreConf.getBoolVar(conf, ConfVars.TXN_USE_MIN_HISTORY_WRITE_ID)); - - // override the config if table does not exists anymore - // this helps to roll out his feature when multiple HMS is accessing the same backend DB - useMinHistoryLevel = checkIfTableIsUsable("MIN_HISTORY_LEVEL", - MetastoreConf.getBoolVar(conf, ConfVars.TXN_USE_MIN_HISTORY_LEVEL)); - } catch (MetaException e) { - String msg = "Error during TxnHandler startup, " + e.getMessage(); - LOG.error(msg); - throw new RuntimeException(e); - } + numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS); + + timeout = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS); + replicationTxnTimeout = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.REPL_TXN_TIMEOUT, TimeUnit.MILLISECONDS); + maxOpenTxns = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.MAX_OPEN_TXNS); + openTxnTimeOutMillis = MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_OPENTXN_TIMEOUT, TimeUnit.MILLISECONDS); try { transactionalListeners = MetaStoreServerUtils.getMetaStoreListeners( - TransactionalMetaStoreEventListener.class, - conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS)); - } catch(MetaException e) { + TransactionalMetaStoreEventListener.class, + conf, MetastoreConf.getVar(conf, MetastoreConf.ConfVars.TRANSACTIONAL_EVENT_LISTENERS)); + } catch (MetaException e) { String msg = "Unable to get transaction listeners, " + e.getMessage(); LOG.error(msg); throw new RuntimeException(e); } - sqlRetryHandler = new SqlRetryHandler(conf, jdbcResource.getDatabaseProduct()); - } - - /** - * Check if provided table is usable - * @return - * @throws MetaException - */ - private boolean checkIfTableIsUsable(String tableName, boolean configValue) throws MetaException { - if (!configValue) { - // don't check it if disabled - return false; - } - Connection dbConn = null; - boolean tableExists = true; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - try (Statement stmt = dbConn.createStatement()) { - // Dummy query to see if table exists - try (ResultSet rs = stmt.executeQuery("SELECT 1 FROM \"" + tableName + "\"")) { - rs.next(); - } - } - } catch (SQLException e) { - LOG.debug("Catching sql exception in " + tableName + " check", e); - if (dbProduct.isTableNotExistsError(e)) { - tableExists = false; - } else { - throw new MetaException( - "Unable to select from transaction database: " + getMessage(e) + StringUtils.stringifyException(e)); - } - } finally { - closeDbConn(dbConn); - } - return tableExists; + sqlRetryHandler = new SqlRetryHandler(conf, jdbcResource.getDatabaseProduct()); + txnLockManager = TransactionalRetryProxy.getProxy(sqlRetryHandler, jdbcResource, new DefaultTxnLockManager(jdbcResource)); } - + @Override - @RetrySemantics.ReadOnly public SqlRetryHandler getRetryHandler() { return sqlRetryHandler; } @Override - @RetrySemantics.ReadOnly public MultiDataSourceJdbcResource getJdbcResourceHolder() { return jdbcResource; } @@ -512,114 +357,21 @@ public Configuration getConf() { } @Override - @RetrySemantics.ReadOnly public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException { - return getOpenTxnsList(true).toOpenTxnsInfoResponse(); + return jdbcResource.execute(new GetOpenTxnsListHandler(true, openTxnTimeOutMillis)) + .toOpenTxnsInfoResponse(); } @Override - @RetrySemantics.ReadOnly public GetOpenTxnsResponse getOpenTxns() throws MetaException { - return getOpenTxnsList(false).toOpenTxnsResponse(Arrays.asList(TxnType.READ_ONLY)); - } - - private GetOpenTxnsResponse getOpenTxns(Connection dbConn) throws MetaException { - return getOpenTxnsList(false, dbConn).toOpenTxnsResponse(Arrays.asList(TxnType.READ_ONLY)); + return jdbcResource.execute(new GetOpenTxnsListHandler(false, openTxnTimeOutMillis)) + .toOpenTxnsResponse(Collections.singletonList(TxnType.READ_ONLY)); } @Override - @RetrySemantics.ReadOnly public GetOpenTxnsResponse getOpenTxns(List excludeTxnTypes) throws MetaException { - return getOpenTxnsList(false).toOpenTxnsResponse(excludeTxnTypes); - } - - private OpenTxnList getOpenTxnsList(boolean infoFileds) throws MetaException { - Connection dbConn = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - return getOpenTxnsList(infoFileds, dbConn); - } catch (SQLException e) { - throw new MetaException( - "Unable to get a connection: " + getMessage(e) + StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - } - } - - private OpenTxnList getOpenTxnsList(boolean infoFields, Connection dbConn) throws MetaException { - try { - // We need to figure out the HighWaterMark and the list of open transactions. - Statement stmt = null; - ResultSet rs = null; - try { - /* - * This method need guarantees from - * {@link #openTxns(OpenTxnRequest)} and {@link #commitTxn(CommitTxnRequest)}. - * It will look at the TXNS table and find each transaction between the max(txn_id) as HighWaterMark - * and the max(txn_id) before the TXN_OPENTXN_TIMEOUT period as LowWaterMark. - * Every transaction that is not found between these will be considered as open, since it may appear later. - * openTxns must ensure, that no new transaction will be opened with txn_id below LWM and - * commitTxn must ensure, that no committed transaction will be removed before the time period expires. - */ - stmt = dbConn.createStatement(); - List txnInfos = new ArrayList<>(); - String txnsQuery = String.format(infoFields ? OpenTxn.OPEN_TXNS_INFO_QUERY : OpenTxn.OPEN_TXNS_QUERY, - getEpochFn(dbProduct)); - LOG.debug("Going to execute query<{}>", txnsQuery); - rs = stmt.executeQuery(txnsQuery); - /* - * We can use the maximum txn_id from the TXNS table as high water mark, since the commitTxn and the Initiator - * guarantees, that the transaction with the highest txn_id will never be removed from the TXNS table. - * If there is a pending openTxns, that is already acquired it's sequenceId but not yet committed the insert - * into the TXNS table, will have either a lower txn_id than HWM and will be listed in the openTxn list, - * or will have a higher txn_id and don't effect this getOpenTxns() call. - */ - long hwm = 0; - long openTxnLowBoundary = 0; - - while (rs.next()) { - long txnId = rs.getLong(1); - long age = rs.getLong(4); - hwm = txnId; - if (age < getOpenTxnTimeOutMillis()) { - // We will consider every gap as an open transaction from the previous txnId - openTxnLowBoundary++; - while (txnId > openTxnLowBoundary) { - // Add an empty open transaction for every missing value - txnInfos.add(new OpenTxn(openTxnLowBoundary, TxnStatus.OPEN, TxnType.DEFAULT)); - LOG.debug("Open transaction added for missing value in TXNS {}", - JavaUtils.txnIdToString(openTxnLowBoundary)); - openTxnLowBoundary++; - } - } else { - openTxnLowBoundary = txnId; - } - TxnStatus state = TxnStatus.fromString(rs.getString(2)); - if (state == TxnStatus.COMMITTED) { - // This is only here, to avoid adding this txnId as possible gap - continue; - } - OpenTxn txnInfo = new OpenTxn(txnId, state, TxnType.findByValue(rs.getInt(3))); - if (infoFields) { - txnInfo.setUser(rs.getString(5)); - txnInfo.setHost(rs.getString(6)); - txnInfo.setStartedTime(rs.getLong(7)); - txnInfo.setLastHeartBeatTime(rs.getLong(8)); - } - txnInfos.add(txnInfo); - } - LOG.debug("Got OpenTxnList with hwm: {} and openTxnList size {}.", hwm, txnInfos.size()); - return new OpenTxnList(hwm, txnInfos); - } catch (SQLException e) { - checkRetryable(e, "getOpenTxnsList"); - throw new MetaException( - "Unable to select from transaction database: " + getMessage(e) + StringUtils.stringifyException(e)); - } finally { - close(rs, stmt, null); - } - } catch (RetryException e) { - return getOpenTxnsList(infoFields, dbConn); - } + return jdbcResource.execute(new GetOpenTxnsListHandler(false, openTxnTimeOutMillis)) + .toOpenTxnsResponse(excludeTxnTypes); } /** @@ -627,7 +379,6 @@ private OpenTxnList getOpenTxnsList(boolean infoFields, Connection dbConn) throw * Worst case, it will leave an open txn which will timeout. */ @Override - @RetrySemantics.Idempotent public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) { tooManyOpenTxns = true; @@ -648,260 +399,75 @@ public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException { throw new MetaException("Invalid input for number of txns: " + numTxns); } - try { - Connection dbConn = null; - Statement stmt = null; - try { - /* - * To make {@link #getOpenTxns()}/{@link #getOpenTxnsInfo()} work correctly, this operation must ensure - * that looking at the TXNS table every open transaction could be identified below a given High Water Mark. - * One way to do it, would be to serialize the openTxns call with a S4U lock, but that would cause - * performance degradation with high transaction load. - * To enable parallel openTxn calls, we define a time period (TXN_OPENTXN_TIMEOUT) and consider every - * transaction missing from the TXNS table in that period open, and prevent opening transaction outside - * the period. - * Example: At t[0] there is one open transaction in the TXNS table, T[1]. - * T[2] acquires the next sequence at t[1] but only commits into the TXNS table at t[10]. - * T[3] acquires its sequence at t[2], and commits into the TXNS table at t[3]. - * Then T[3] calculates it’s snapshot at t[4] and puts T[1] and also T[2] in the snapshot’s - * open transaction list. T[1] because it is presented as open in TXNS, - * T[2] because it is a missing sequence. - * - * In the current design, there can be several metastore instances running in a given Warehouse. - * This makes ideas like reserving a range of IDs to save trips to DB impossible. For example, - * a client may go to MS1 and start a transaction with ID 500 to update a particular row. - * Now the same client will start another transaction, except it ends up on MS2 and may get - * transaction ID 400 and update the same row. Now the merge that happens to materialize the snapshot - * on read will thing the version of the row from transaction ID 500 is the latest one. - * - * Longer term we can consider running Active-Passive MS (at least wrt to ACID operations). This - * set could support a write-through cache for added performance. - */ - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - /* - * The openTxn and commitTxn must be mutexed, when committing a not read only transaction. - * This is achieved by requesting a shared table lock here, and an exclusive one at commit. - * Since table locks are working in Derby, we don't need the lockInternal call here. - * Example: Suppose we have two transactions with update like x = x+1. - * We have T[3,3] that was using a value from a snapshot with T[2,2]. If we allow committing T[3,3] - * and opening T[4] parallel it is possible, that T[4] will be using the value from a snapshot with T[2,2], - * and we will have a lost update problem - */ - acquireTxnLock(stmt, true); - // Measure the time from acquiring the sequence value, till committing in the TXNS table - StopWatch generateTransactionWatch = new StopWatch(); - generateTransactionWatch.start(); - - List txnIds = openTxns(dbConn, rqst); + /* + * To make {@link #getOpenTxns()}/{@link #getOpenTxnsInfo()} work correctly, this operation must ensure + * that looking at the TXNS table every open transaction could be identified below a given High Water Mark. + * One way to do it, would be to serialize the openTxns call with a S4U lock, but that would cause + * performance degradation with high transaction load. + * To enable parallel openTxn calls, we define a time period (TXN_OPENTXN_TIMEOUT) and consider every + * transaction missing from the TXNS table in that period open, and prevent opening transaction outside + * the period. + * Example: At t[0] there is one open transaction in the TXNS table, T[1]. + * T[2] acquires the next sequence at t[1] but only commits into the TXNS table at t[10]. + * T[3] acquires its sequence at t[2], and commits into the TXNS table at t[3]. + * Then T[3] calculates it’s snapshot at t[4] and puts T[1] and also T[2] in the snapshot’s + * open transaction list. T[1] because it is presented as open in TXNS, + * T[2] because it is a missing sequence. + * + * In the current design, there can be several metastore instances running in a given Warehouse. + * This makes ideas like reserving a range of IDs to save trips to DB impossible. For example, + * a client may go to MS1 and start a transaction with ID 500 to update a particular row. + * Now the same client will start another transaction, except it ends up on MS2 and may get + * transaction ID 400 and update the same row. Now the merge that happens to materialize the snapshot + * on read will thing the version of the row from transaction ID 500 is the latest one. + * + * Longer term we can consider running Active-Passive MS (at least wrt to ACID operations). This + * set could support a write-through cache for added performance. + */ + /* + * The openTxn and commitTxn must be mutexed, when committing a not read only transaction. + * This is achieved by requesting a shared table lock here, and an exclusive one at commit. + * Since table locks are working in Derby, we don't need the lockInternal call here. + * Example: Suppose we have two transactions with update like x = x+1. + * We have T[3,3] that was using a value from a snapshot with T[2,2]. If we allow committing T[3,3] + * and opening T[4] parallel it is possible, that T[4] will be using the value from a snapshot with T[2,2], + * and we will have a lost update problem + */ + acquireTxnLock(true); + // Measure the time from acquiring the sequence value, till committing in the TXNS table + StopWatch generateTransactionWatch = new StopWatch(); + generateTransactionWatch.start(); - LOG.debug("Going to commit"); - dbConn.commit(); - generateTransactionWatch.stop(); - long elapsedMillis = generateTransactionWatch.getTime(TimeUnit.MILLISECONDS); - TxnType txnType = rqst.isSetTxn_type() ? rqst.getTxn_type() : TxnType.DEFAULT; - if (txnType != TxnType.READ_ONLY && elapsedMillis >= openTxnTimeOutMillis) { - /* - * The commit was too slow, we can not allow this to continue (except if it is read only, - * since that can not cause dirty reads). - * When calculating the snapshot for a given transaction, we look back for possible open transactions - * (that are not yet committed in the TXNS table), for TXN_OPENTXN_TIMEOUT period. - * We can not allow a write transaction, that was slower than TXN_OPENTXN_TIMEOUT to continue, - * because there can be other transactions running, that didn't considered this transactionId open, - * this could cause dirty reads. - */ - LOG.error("OpenTxnTimeOut exceeded commit duration {}, deleting transactionIds: {}", elapsedMillis, txnIds); - deleteInvalidOpenTransactions(dbConn, txnIds); - dbConn.commit(); - /* - * We do not throw RetryException directly, to not circumvent the max retry limit - */ - throw new SQLException("OpenTxnTimeOut exceeded", MANUAL_RETRY); - } - return new OpenTxnsResponse(txnIds); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "openTxns(" + rqst + ")"); - throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e)); - } finally { - close(null, stmt, dbConn); - } - } catch (RetryException e) { - return openTxns(rqst); - } - } + List txnIds = new OpenTxnsFunction(rqst, openTxnTimeOutMillis, transactionalListeners).execute(jdbcResource); - private List openTxns(Connection dbConn, OpenTxnRequest rqst) - throws SQLException, MetaException { - int numTxns = rqst.getNum_txns(); - // Make sure the user has not requested an insane amount of txns. - int maxTxns = MetastoreConf.getIntVar(conf, ConfVars.TXN_MAX_OPEN_BATCH); - if (numTxns > maxTxns) { - numTxns = maxTxns; - } - List insertPreparedStmts = null; + LOG.debug("Going to commit"); + jdbcResource.getTransactionManager().getActiveTransaction().createSavepoint(); + generateTransactionWatch.stop(); + long elapsedMillis = generateTransactionWatch.getTime(TimeUnit.MILLISECONDS); TxnType txnType = rqst.isSetTxn_type() ? rqst.getTxn_type() : TxnType.DEFAULT; - boolean isReplayedReplTxn = txnType == TxnType.REPL_CREATED; - boolean isHiveReplTxn = rqst.isSetReplPolicy() && txnType == TxnType.DEFAULT; - try { - if (isReplayedReplTxn) { - assert rqst.isSetReplPolicy(); - List targetTxnIdList = getTargetTxnIdList(rqst.getReplPolicy(), rqst.getReplSrcTxnIds(), dbConn); - - if (!targetTxnIdList.isEmpty()) { - if (targetTxnIdList.size() != rqst.getReplSrcTxnIds().size()) { - LOG.warn("target txn id number {} is not matching with source txn id number {}", - targetTxnIdList, rqst.getReplSrcTxnIds()); - } - LOG.info("Target transactions {} are present for repl policy : {} and Source transaction id : {}", - targetTxnIdList.toString(), rqst.getReplPolicy(), rqst.getReplSrcTxnIds().toString()); - return targetTxnIdList; - } - } - - long minOpenTxnId = 0; - if (useMinHistoryLevel) { - minOpenTxnId = getMinOpenTxnIdWaterMark(dbConn); - } - - List txnIds = new ArrayList<>(numTxns); + if (txnType != TxnType.READ_ONLY && elapsedMillis >= openTxnTimeOutMillis) { /* - * The getGeneratedKeys are not supported in every dbms, after executing a multi line insert. - * But it is supported in every used dbms for single line insert, even if the metadata says otherwise. - * If the getGeneratedKeys are not supported first we insert a random batchId in the TXN_META_INFO field, - * then the keys are selected beck with that batchid. + * The commit was too slow, we can not allow this to continue (except if it is read only, + * since that can not cause dirty reads). + * When calculating the snapshot for a given transaction, we look back for possible open transactions + * (that are not yet committed in the TXNS table), for TXN_OPENTXN_TIMEOUT period. + * We can not allow a write transaction, that was slower than TXN_OPENTXN_TIMEOUT to continue, + * because there can be other transactions running, that didn't considered this transactionId open, + * this could cause dirty reads. */ - boolean genKeySupport = dbProduct.supportsGetGeneratedKeys(); - genKeySupport = genKeySupport || (numTxns == 1); - - String insertQuery = String.format(TXNS_INSERT_QRY, getEpochFn(dbProduct), getEpochFn(dbProduct)); - LOG.debug("Going to execute insert <{}>", insertQuery); - try (PreparedStatement ps = dbConn.prepareStatement(insertQuery, new String[] {"TXN_ID"})) { - String state = genKeySupport ? TxnStatus.OPEN.getSqlConst() : TXN_TMP_STATE; - if (numTxns == 1) { - ps.setString(1, state); - ps.setString(2, rqst.getUser()); - ps.setString(3, rqst.getHostname()); - ps.setInt(4, txnType.getValue()); - txnIds.addAll(executeTxnInsertBatchAndExtractGeneratedKeys(dbConn, genKeySupport, ps, false)); - } else { - for (int i = 0; i < numTxns; ++i) { - ps.setString(1, state); - ps.setString(2, rqst.getUser()); - ps.setString(3, rqst.getHostname()); - ps.setInt(4, txnType.getValue()); - ps.addBatch(); - - if ((i + 1) % maxBatchSize == 0) { - txnIds.addAll(executeTxnInsertBatchAndExtractGeneratedKeys(dbConn, genKeySupport, ps, true)); - } - } - if (numTxns % maxBatchSize != 0) { - txnIds.addAll(executeTxnInsertBatchAndExtractGeneratedKeys(dbConn, genKeySupport, ps, true)); - } - } - } - - assert txnIds.size() == numTxns; - - addTxnToMinHistoryLevel(dbConn, txnIds, minOpenTxnId); - - if (isReplayedReplTxn) { - List rowsRepl = new ArrayList<>(numTxns); - List params = Collections.singletonList(rqst.getReplPolicy()); - List> paramsList = new ArrayList<>(numTxns); - for (int i = 0; i < numTxns; i++) { - rowsRepl.add("?," + rqst.getReplSrcTxnIds().get(i) + "," + txnIds.get(i)); - paramsList.add(params); - } - - insertPreparedStmts = sqlGenerator.createInsertValuesPreparedStmt(dbConn, - "\"REPL_TXN_MAP\" (\"RTM_REPL_POLICY\", \"RTM_SRC_TXN_ID\", \"RTM_TARGET_TXN_ID\")", rowsRepl, - paramsList); - for (PreparedStatement pst : insertPreparedStmts) { - pst.execute(); - } - } + LOG.error("OpenTxnTimeOut exceeded commit duration {}, deleting transactionIds: {}", elapsedMillis, txnIds); - if (transactionalListeners != null && !isHiveReplTxn) { - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.OPEN_TXN, new OpenTxnEvent(txnIds, txnType), dbConn, sqlGenerator); - } - return txnIds; - } finally { - if (insertPreparedStmts != null) { - for (PreparedStatement pst : insertPreparedStmts) { - pst.close(); - } - } - } - } - - private List executeTxnInsertBatchAndExtractGeneratedKeys(Connection dbConn, boolean genKeySupport, - PreparedStatement ps, boolean batch) throws SQLException { - List txnIds = new ArrayList<>(); - if (batch) { - ps.executeBatch(); - } else { - // For slight performance advantage we do not use the executeBatch, when we only have one row - ps.execute(); - } - if (genKeySupport) { - try (ResultSet generatedKeys = ps.getGeneratedKeys()) { - while (generatedKeys.next()) { - txnIds.add(generatedKeys.getLong(1)); - } - } - } else { - try (PreparedStatement pstmt = - dbConn.prepareStatement("SELECT \"TXN_ID\" FROM \"TXNS\" WHERE \"TXN_STATE\" = ?")) { - pstmt.setString(1, TXN_TMP_STATE); - try (ResultSet rs = pstmt.executeQuery()) { - while (rs.next()) { - txnIds.add(rs.getLong(1)); - } - } - } - try (PreparedStatement pstmt = dbConn - .prepareStatement("UPDATE \"TXNS\" SET \"TXN_STATE\" = ? WHERE \"TXN_STATE\" = ?")) { - pstmt.setString(1, TxnStatus.OPEN.getSqlConst()); - pstmt.setString(2, TXN_TMP_STATE); - pstmt.executeUpdate(); + if (!txnIds.isEmpty()) { + deleteInvalidOpenTransactions(txnIds); } - } - return txnIds; - } - private void deleteInvalidOpenTransactions(Connection dbConn, List txnIds) throws MetaException { - if (txnIds.size() == 0) { - return; + /* + * We cannot throw SQLException directly, as it is not in the throws clause + */ + throw new SqlRetryException("OpenTxnTimeOut exceeded"); } - try { - Statement stmt = null; - try { - stmt = dbConn.createStatement(); - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - prefix.append("DELETE FROM \"TXNS\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnIds, "\"TXN_ID\"", false, false); - executeQueriesInBatchNoCount(dbProduct, stmt, queries, maxBatchSize); - LOG.info("Removed transactions: ({}) from TXNS", txnIds); - - removeTxnsFromMinHistoryLevel(dbConn, txnIds); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "deleteInvalidOpenTransactions(" + txnIds + ")"); - throw new MetaException("Unable to select from transaction database " + StringUtils.stringifyException(e)); - } finally { - closeStmt(stmt); - } - } catch (RetryException ex) { - deleteInvalidOpenTransactions(dbConn, txnIds); - } + return new OpenTxnsResponse(txnIds); } @Override @@ -914,518 +480,89 @@ public void setOpenTxnTimeOutMillis(long openTxnTimeOutMillis) { TxnHandler.openTxnTimeOutMillis = openTxnTimeOutMillis; } - protected long getOpenTxnTimeoutLowBoundaryTxnId(Connection dbConn) throws MetaException, SQLException { - long maxTxnId; - String s = - "SELECT MAX(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STARTED\" < (" + getEpochFn(dbProduct) + " - " - + openTxnTimeOutMillis + ")"; - try (Statement stmt = dbConn.createStatement()) { - LOG.debug("Going to execute query <{}>", s); - try (ResultSet maxTxnIdRs = stmt.executeQuery(s)) { - maxTxnIdRs.next(); - maxTxnId = maxTxnIdRs.getLong(1); - if (maxTxnIdRs.wasNull()) { - /* - * TXNS always contains at least one transaction, - * the row where txnid = (select max(txnid) where txn_started < epoch - TXN_OPENTXN_TIMEOUT) is never deleted - */ - throw new MetaException("Transaction tables not properly " + "initialized, null record found in MAX(TXN_ID)"); - } - } + @Override + public long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException { + List targetTxnIds =jdbcResource.execute(new TargetTxnIdListHandler(replPolicy, Collections.singletonList(sourceTxnId))); + if (targetTxnIds.isEmpty()) { + LOG.info("Txn {} not present for repl policy {}", sourceTxnId, replPolicy); + return -1; } - return maxTxnId; + assert (targetTxnIds.size() == 1); + return targetTxnIds.get(0); } - private long getHighWaterMark(Statement stmt) throws SQLException, MetaException { - String s = "SELECT MAX(\"TXN_ID\") FROM \"TXNS\""; - LOG.debug("Going to execute query <{}>", s); - long maxOpenTxnId; - try (ResultSet maxOpenTxnIdRs = stmt.executeQuery(s)) { - maxOpenTxnIdRs.next(); - maxOpenTxnId = maxOpenTxnIdRs.getLong(1); - if (maxOpenTxnIdRs.wasNull()) { - /* - * TXNS always contains at least one transaction, - * the row where txnid = (select max(txnid) where txn_started < epoch - TXN_OPENTXN_TIMEOUT) is never deleted - */ - throw new MetaException("Transaction tables not properly " + "initialized, null record found in MAX(TXN_ID)"); + @Override + public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException { + TxnType txnType = new AbortTxnFunction(rqst).execute(jdbcResource); + if (txnType != null) { + if (transactionalListeners != null && (!rqst.isSetReplPolicy() || !TxnType.DEFAULT.equals(rqst.getTxn_type()))) { + List dbsUpdated = getTxnDbsUpdated(rqst.getTxnid()); + MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, EventMessage.EventType.ABORT_TXN, + new AbortTxnEvent(rqst.getTxnid(), txnType, null, dbsUpdated), jdbcResource.getConnection(), sqlGenerator); } } - return maxOpenTxnId; } - private List getTargetTxnIdList(String replPolicy, List sourceTxnIdList, Connection dbConn) - throws SQLException { - PreparedStatement pst = null; - ResultSet rs = null; + @Override + public void abortTxns(AbortTxnsRequest rqst) throws MetaException { + List txnIds = rqst.getTxn_ids(); + TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE; + if (rqst.isSetErrorCode()) { + txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode()); + } + + List queries = new ArrayList<>(); + StringBuilder prefix = + new StringBuilder("SELECT \"TXN_ID\", \"TXN_TYPE\" from \"TXNS\" where \"TXN_STATE\" = ") + .append(TxnStatus.OPEN) + .append(" and \"TXN_TYPE\" != ").append(TxnType.READ_ONLY.getValue()).append(" and "); + + TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), + txnIds, "\"TXN_ID\"", false, false); + + Connection dbConn = jdbcResource.getConnection(); try { - List inQueries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - List targetTxnIdList = new ArrayList<>(); - prefix.append("SELECT \"RTM_TARGET_TXN_ID\" FROM \"REPL_TXN_MAP\" WHERE "); - suffix.append(" AND \"RTM_REPL_POLICY\" = ?"); - TxnUtils.buildQueryWithINClause(conf, inQueries, prefix, suffix, sourceTxnIdList, - "\"RTM_SRC_TXN_ID\"", false, false); - List params = Arrays.asList(replPolicy); - for (String query : inQueries) { - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute select <" + query.replace("?", "{}") + ">", quoteString(replPolicy)); + Map nonReadOnlyTxns = new HashMap<>(); + for (String query : queries) { + LOG.debug("Going to execute query <{}>", query); + try (Statement stmt = dbConn.createStatement(); ResultSet rs = stmt.executeQuery(sqlGenerator.addForUpdateClause(query))) { + while (rs.next()) { + TxnType txnType = TxnType.findByValue(rs.getInt(2)); + nonReadOnlyTxns.put(rs.getLong(1), txnType); + } } - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, params); - rs = pst.executeQuery(); - while (rs.next()) { - targetTxnIdList.add(rs.getLong(1)); + } + int numAborted = new AbortTxnsFunction(txnIds, false, false, false, txnErrorMsg).execute(jdbcResource); + if (numAborted != txnIds.size()) { + LOG.warn( + "Abort Transactions command only aborted {} out of {} transactions. It's possible that the other" + + " {} transactions have been aborted or committed, or the transaction ids are invalid.", + numAborted, txnIds.size(), (txnIds.size() - numAborted)); + } + + if (transactionalListeners != null) { + for (Long txnId : txnIds) { + List dbsUpdated = getTxnDbsUpdated(txnId); + MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, + EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnId, + nonReadOnlyTxns.getOrDefault(txnId, TxnType.READ_ONLY), null, dbsUpdated), dbConn, sqlGenerator); } - closeStmt(pst); } - LOG.debug("targetTxnid for srcTxnId " + sourceTxnIdList.toString() + " is " + targetTxnIdList.toString()); - return targetTxnIdList; - } finally { - closeStmt(pst); - close(rs); - } - } - - @Override - @RetrySemantics.Idempotent - public long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException { - try { - Connection dbConn = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - List targetTxnIds = getTargetTxnIdList(replPolicy, Collections.singletonList(sourceTxnId), dbConn); - if (targetTxnIds.isEmpty()) { - LOG.info("Txn {} not present for repl policy {}", sourceTxnId, replPolicy); - return -1; - } - assert (targetTxnIds.size() == 1); - return targetTxnIds.get(0); - } catch (SQLException e) { - checkRetryable(e, "getTargetTxnId(" + replPolicy + sourceTxnId + ")"); - throw new MetaException("Unable to get target transaction id " - + StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - unlockInternal(); - } - } catch (RetryException e) { - return getTargetTxnId(replPolicy, sourceTxnId); - } - } - - private Set getDbNamesForReplayedTxns(Connection dbConn, List targetTxnIds) throws SQLException { - Set dbNames = new HashSet<>(); - if (targetTxnIds.isEmpty()) { - return dbNames; - } - PreparedStatement pst = null; - ResultSet rs = null; - try { - List inQueries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - prefix.append("SELECT \"RTM_REPL_POLICY\" FROM \"REPL_TXN_MAP\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, inQueries, prefix, new StringBuilder(), targetTxnIds, - "\"RTM_TARGET_TXN_ID\"", false, false); - for (String query : inQueries) { - LOG.debug("Going to execute select <{}>", query); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, null); - rs = pst.executeQuery(); - while (rs.next()) { - dbNames.add(MetaStoreUtils.getDbNameFromReplPolicy(rs.getString(1))); - } - } - return dbNames; - } finally { - closeStmt(pst); - close(rs); - } - } - - private void deleteReplTxnMapEntry(Connection dbConn, long sourceTxnId, String replPolicy) throws SQLException { - String s = "DELETE FROM \"REPL_TXN_MAP\" WHERE \"RTM_SRC_TXN_ID\" = " + sourceTxnId + " AND \"RTM_REPL_POLICY\" = ?"; - try (PreparedStatement pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, Arrays.asList(replPolicy))) { - LOG.info("Going to execute <" + s.replace("?", "{}") + ">", quoteString(replPolicy)); - pst.executeUpdate(); - } - } - - @Override - @RetrySemantics.Idempotent - public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException { - long txnid = rqst.getTxnid(); - TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE; - long sourceTxnId = -1; - boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type()); - boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type()); - try { - Connection dbConn = null; - Statement stmt = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - - if (isReplayedReplTxn) { - assert (rqst.isSetReplPolicy()); - sourceTxnId = rqst.getTxnid(); - List targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(), - Collections.singletonList(sourceTxnId), dbConn); - if (targetTxnIds.isEmpty()) { - // Idempotent case where txn was already closed or abort txn event received without - // corresponding open txn event. - LOG.info("Target txn id is missing for source txn id : {} and repl policy {}", sourceTxnId, - rqst.getReplPolicy()); - return; - } - assert targetTxnIds.size() == 1; - txnid = targetTxnIds.get(0); - } - - TxnType txnType = getOpenTxnTypeAndLock(stmt, txnid); - if (txnType == null) { - TxnStatus status = findTxnState(txnid, stmt); - if (status == TxnStatus.ABORTED) { - if (isReplayedReplTxn) { - // in case of replication, idempotent is taken care by getTargetTxnId - LOG.warn("Invalid state ABORTED for transactions started using replication replay task"); - deleteReplTxnMapEntry(dbConn, sourceTxnId, rqst.getReplPolicy()); - } - LOG.info("abortTxn({}) requested by it is already {}", JavaUtils.txnIdToString(txnid), TxnStatus.ABORTED); - return; - } - raiseTxnUnexpectedState(status, txnid); - } - - if (isReplayedReplTxn) { - txnErrorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN; - } else if (isHiveReplTxn) { - txnErrorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN; - } else if (rqst.isSetErrorCode()) { - txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode()); - } - - abortTxns(dbConn, Collections.singletonList(txnid), true, isReplayedReplTxn, txnErrorMsg); - - if (isReplayedReplTxn) { - deleteReplTxnMapEntry(dbConn, sourceTxnId, rqst.getReplPolicy()); - } - - if (transactionalListeners != null && !isHiveReplTxn) { - List dbsUpdated = getTxnDbsUpdated(txnid, dbConn); - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.ABORT_TXN, - new AbortTxnEvent(txnid, txnType, null, dbsUpdated), dbConn, sqlGenerator); - } - - LOG.debug("Going to commit"); - dbConn.commit(); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "abortTxn(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " - + StringUtils.stringifyException(e)); - } finally { - close(null, stmt, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - abortTxn(rqst); - } - } - - @Override - @RetrySemantics.Idempotent - public void abortTxns(AbortTxnsRequest rqst) throws MetaException { - List txnIds = rqst.getTxn_ids(); - TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE; - if (rqst.isSetErrorCode()) { - txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode()); - } - try { - Connection dbConn = null; - Statement stmt = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - - List queries = new ArrayList<>(); - StringBuilder prefix = - new StringBuilder("SELECT \"TXN_ID\", \"TXN_TYPE\" from \"TXNS\" where \"TXN_STATE\" = ") - .append(TxnStatus.OPEN) - .append(" and \"TXN_TYPE\" != ").append(TxnType.READ_ONLY.getValue()).append(" and "); - - TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), - txnIds, "\"TXN_ID\"", false, false); - - Map nonReadOnlyTxns = new HashMap<>(); - for (String query : queries) { - LOG.debug("Going to execute query <{}>", query); - try (ResultSet rs = stmt.executeQuery(sqlGenerator.addForUpdateClause(query))) { - while (rs.next()) { - TxnType txnType = TxnType.findByValue(rs.getInt(2)); - nonReadOnlyTxns.put(rs.getLong(1), txnType); - } - } - } - int numAborted = abortTxns(dbConn, txnIds, false, false, txnErrorMsg); - if (numAborted != txnIds.size()) { - LOG.warn( - "Abort Transactions command only aborted {} out of {} transactions. It's possible that the other" - + " {} transactions have been aborted or committed, or the transaction ids are invalid.", - numAborted, txnIds.size(), (txnIds.size() - numAborted)); - } - - if (transactionalListeners != null){ - for (Long txnId : txnIds) { - List dbsUpdated = getTxnDbsUpdated(txnId, dbConn); - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnId, - nonReadOnlyTxns.getOrDefault(txnId, TxnType.READ_ONLY), null, dbsUpdated), dbConn, sqlGenerator); - } - } - LOG.debug("Going to commit"); - dbConn.commit(); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "abortTxns(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " - + StringUtils.stringifyException(e)); - } finally { - closeStmt(stmt); - closeDbConn(dbConn); - } - } catch (RetryException e) { - abortTxns(rqst); - } - } - - private long getDatabaseId(Connection dbConn, String database, String catalog) throws SQLException, MetaException { - ResultSet rs = null; - PreparedStatement pst = null; - try { - String query = "select \"DB_ID\" from \"DBS\" where \"NAME\" = ? and \"CTLG_NAME\" = ?"; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, Arrays.asList(database, catalog)); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + query.replace("?", "{}") + ">", - quoteString(database), quoteString(catalog)); - } - rs = pst.executeQuery(); - if (!rs.next()) { - throw new MetaException("DB with name " + database + " does not exist in catalog " + catalog); - } - return rs.getLong(1); - } finally { - close(rs); - closeStmt(pst); - } - } - - private void updateDatabaseProp(Connection dbConn, String database, - long dbId, String prop, String propValue) throws SQLException { - ResultSet rs = null; - PreparedStatement pst = null; - try { - String query = "SELECT \"PARAM_VALUE\" FROM \"DATABASE_PARAMS\" WHERE \"PARAM_KEY\" = " + - "'" + prop + "' AND \"DB_ID\" = " + dbId; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, null); - rs = pst.executeQuery(); - query = null; - if (!rs.next()) { - query = "INSERT INTO \"DATABASE_PARAMS\" VALUES ( " + dbId + " , '" + prop + "' , ? )"; - } else if (!rs.getString(1).equals(propValue)) { - query = "UPDATE \"DATABASE_PARAMS\" SET \"PARAM_VALUE\" = ? WHERE \"DB_ID\" = " + dbId + - " AND \"PARAM_KEY\" = '" + prop + "'"; - } - closeStmt(pst); - if (query == null) { - LOG.info("Database property: {} with value: {} already updated for db: {}", prop, propValue, database); - return; - } - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, Arrays.asList(propValue)); - if (LOG.isDebugEnabled()) { - LOG.debug("Updating " + prop + " for db: " + database + " <" + query.replace("?", "{}") + ">", propValue); - } - if (pst.executeUpdate() != 1) { - //only one row insert or update should happen - throw new RuntimeException("DATABASE_PARAMS is corrupted for database: " + database); - } - } finally { - close(rs); - closeStmt(pst); - } - } - - private void markDbAsReplIncompatible(Connection dbConn, String database) throws SQLException, MetaException { - Statement stmt = null; - try { - stmt = dbConn.createStatement(); - String catalog = MetaStoreUtils.getDefaultCatalog(conf); - String s = sqlGenerator.getDbProduct().getPrepareTxnStmt(); - if (s != null) { - stmt.execute(s); - } - long dbId = getDatabaseId(dbConn, database, catalog); - updateDatabaseProp(dbConn, database, dbId, ReplConst.REPL_INCOMPATIBLE, ReplConst.TRUE); - } finally { - closeStmt(stmt); - } - } - - private void updateReplId(Connection dbConn, ReplLastIdInfo replLastIdInfo) throws SQLException, MetaException { - PreparedStatement pst = null; - PreparedStatement pstInt = null; - ResultSet rs = null; - ResultSet prs = null; - Statement stmt = null; - String query; - List params; - String lastReplId = Long.toString(replLastIdInfo.getLastReplId()); - String catalog = replLastIdInfo.isSetCatalog() ? normalizeIdentifier(replLastIdInfo.getCatalog()) : - MetaStoreUtils.getDefaultCatalog(conf); - String db = normalizeIdentifier(replLastIdInfo.getDatabase()); - String table = replLastIdInfo.isSetTable() ? normalizeIdentifier(replLastIdInfo.getTable()) : null; - List partList = replLastIdInfo.isSetPartitionList() ? replLastIdInfo.getPartitionList() : null; - - try { - stmt = dbConn.createStatement(); - - String s = sqlGenerator.getDbProduct().getPrepareTxnStmt(); - if (s != null) { - stmt.execute(s); - } - - long dbId = getDatabaseId(dbConn, db, catalog); - - // not used select for update as it will be updated by single thread only from repl load - updateDatabaseProp(dbConn, db, dbId, ReplConst.REPL_TARGET_TABLE_PROPERTY, lastReplId); - - if (table == null) { - // if only database last repl id to be updated. - return; - } - - query = "SELECT \"TBL_ID\" FROM \"TBLS\" WHERE \"TBL_NAME\" = ? AND \"DB_ID\" = " + dbId; - params = Arrays.asList(table); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + query.replace("?", "{}") + ">", quoteString(table)); - } - - rs = pst.executeQuery(); - if (!rs.next()) { - throw new MetaException("Table with name " + table + " does not exist in db " + catalog + "." + db); - } - long tblId = rs.getLong(1); - rs.close(); - pst.close(); - - // select for update is not required as only one task will update this during repl load. - rs = stmt.executeQuery("SELECT \"PARAM_VALUE\" FROM \"TABLE_PARAMS\" WHERE \"PARAM_KEY\" = " + - "'repl.last.id' AND \"TBL_ID\" = " + tblId); - if (!rs.next()) { - query = "INSERT INTO \"TABLE_PARAMS\" VALUES ( " + tblId + " , 'repl.last.id' , ? )"; - } else { - query = "UPDATE \"TABLE_PARAMS\" SET \"PARAM_VALUE\" = ? WHERE \"TBL_ID\" = " + tblId + - " AND \"PARAM_KEY\" = 'repl.last.id'"; - } - rs.close(); - - params = Arrays.asList(lastReplId); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Updating repl id for table <" + query.replace("?", "{}") + ">", lastReplId); - } - if (pst.executeUpdate() != 1) { - //only one row insert or update should happen - throw new RuntimeException("TABLE_PARAMS is corrupted for table " + table); - } - pst.close(); - - if (partList == null || partList.isEmpty()) { - return; - } - - List questions = new ArrayList<>(); - for(int i = 0; i < partList.size(); ++i) { - questions.add("?"); - } - - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - prefix.append("SELECT \"PART_ID\" FROM \"PARTITIONS\" WHERE \"TBL_ID\" = " + tblId + " and "); - - // Populate the complete query with provided prefix and suffix - List counts = TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, - questions, "\"PART_NAME\"", true, false); - int totalCount = 0; - assert queries.size() == counts.size(); - params = Arrays.asList(lastReplId); - for (int i = 0; i < queries.size(); i++) { - query = queries.get(i); - int partCount = counts.get(i); - - LOG.debug("Going to execute query {} with partitions {}", query, - partList.subList(totalCount, (totalCount + partCount))); - pst = dbConn.prepareStatement(query); - for (int j = 0; j < partCount; j++) { - pst.setString(j + 1, partList.get(totalCount + j)); - } - totalCount += partCount; - prs = pst.executeQuery(); - while (prs.next()) { - long partId = prs.getLong(1); - rs = stmt.executeQuery("SELECT \"PARAM_VALUE\" FROM \"PARTITION_PARAMS\" WHERE \"PARAM_KEY\" " + - " = 'repl.last.id' AND \"PART_ID\" = " + partId); - if (!rs.next()) { - query = "INSERT INTO \"PARTITION_PARAMS\" VALUES ( " + partId + " , 'repl.last.id' , ? )"; - } else { - query = "UPDATE \"PARTITION_PARAMS\" SET \"PARAM_VALUE\" = ? " + - " WHERE \"PART_ID\" = " + partId + " AND \"PARAM_KEY\" = 'repl.last.id'"; - } - rs.close(); - - pstInt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Updating repl id for part <" + query.replace("?", "{}") + ">", lastReplId); - } - if (pstInt.executeUpdate() != 1) { - //only one row insert or update should happen - throw new RuntimeException("PARTITION_PARAMS is corrupted for partition " + partId); - } - partCount--; - pstInt.close(); - } - if (partCount != 0) { - throw new MetaException(partCount + " Number of partition among " + partList + " does not exist in table " + - catalog + "." + db + "." + table); - } - prs.close(); - pst.close(); - } - } finally { - closeStmt(stmt); - close(rs); - close(prs); - closeStmt(pst); - closeStmt(pstInt); + } catch (SQLException e) { + throw new UncategorizedSQLException(null, null, e); } } /** * Concurrency/isolation notes: - * This is mutexed with {@link #openTxns(OpenTxnRequest)} and other {@link #commitTxn(CommitTxnRequest)} + * This is mutexed with {@link #openTxns(OpenTxnRequest)} and other commitTxn(CommitTxnRequest) * operations using select4update on NEXT_TXN_ID. Also, mutexes on TXNS table for specific txnid:X * see more notes below. * In order to prevent lost updates, we need to determine if any 2 transactions overlap. Each txn * is viewed as an interval [M,N]. M is the txnid and N is taken from the same NEXT_TXN_ID sequence * so that we can compare commit time of txn T with start time of txn S. This sequence can be thought of - * as a logical time counter. If S.commitTime < T.startTime, T and S do NOT overlap. - * + * as a logical time counter. If S.commitTime < T.startTime, T and S do NOT overlap. + *

* Motivating example: * Suppose we have multi-statement transactions T and S both of which are attempting x = x + 1 * In order to prevent lost update problem, then the non-overlapping txns must lock in the snapshot @@ -1442,248 +579,8 @@ private void updateReplId(Connection dbConn, ReplLastIdInfo replLastIdInfo) thro * 'x' would be updated to the same value by both, i.e. lost update. */ @Override - @RetrySemantics.Idempotent("No-op if already committed") public void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { - char isUpdateDelete = 'N'; - long txnid = rqst.getTxnid(); - long sourceTxnId = -1; - - boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type()); - boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type()); - //start a new transaction - jdbcResource.bindDataSource(POOL_TX); - try (TransactionContext context = jdbcResource.getTransactionManager().getTransaction(PROPAGATION_REQUIRED)) { - Connection dbConn = null; - Statement stmt = null; - Long commitId = null; - try { - lockInternal(); - //make sure we are using the connection bound to the transaction, so obtain it via DataSourceUtils.getConnection() - dbConn = jdbcResource.getConnection(); - stmt = dbConn.createStatement(); - - if (rqst.isSetReplLastIdInfo()) { - updateReplId(dbConn, rqst.getReplLastIdInfo()); - } - - if (isReplayedReplTxn) { - assert (rqst.isSetReplPolicy()); - sourceTxnId = rqst.getTxnid(); - List targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(), - Collections.singletonList(sourceTxnId), dbConn); - if (targetTxnIds.isEmpty()) { - // Idempotent case where txn was already closed or commit txn event received without - // corresponding open txn event. - LOG.info("Target txn id is missing for source txn id : {} and repl policy {}", sourceTxnId, - rqst.getReplPolicy()); - return; - } - assert targetTxnIds.size() == 1; - txnid = targetTxnIds.get(0); - } - - /** - * Runs at READ_COMMITTED with S4U on TXNS row for "txnid". S4U ensures that no other - * operation can change this txn (such acquiring locks). While lock() and commitTxn() - * should not normally run concurrently (for same txn) but could due to bugs in the client - * which could then corrupt internal transaction manager state. Also competes with abortTxn(). - */ - TxnType txnType = getOpenTxnTypeAndLock(stmt, txnid); - if (txnType == null) { - //if here, txn was not found (in expected state) - TxnStatus actualTxnStatus = findTxnState(txnid, stmt); - if (actualTxnStatus == TxnStatus.COMMITTED) { - if (isReplayedReplTxn) { - // in case of replication, idempotent is taken care by getTargetTxnId - LOG.warn("Invalid state COMMITTED for transactions started using replication replay task"); - } - /** - * This makes the operation idempotent - * (assume that this is most likely due to retry logic) - */ - LOG.info("Nth commitTxn({}) msg", JavaUtils.txnIdToString(txnid)); - return; - } - raiseTxnUnexpectedState(actualTxnStatus, txnid); - } - - String conflictSQLSuffix = "FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\"=" + txnid + " AND \"TC_OPERATION_TYPE\" IN (" + - OperationType.UPDATE + "," + OperationType.DELETE + ")"; - long tempCommitId = generateTemporaryId(); - - if (txnType == TxnType.SOFT_DELETE || txnType == TxnType.COMPACTION) { - acquireTxnLock(stmt, false); - commitId = getHighWaterMark(stmt); - - } else if (txnType != TxnType.READ_ONLY && !isReplayedReplTxn) { - String writeSetInsertSql = "INSERT INTO \"WRITE_SET\" (\"WS_DATABASE\", \"WS_TABLE\", \"WS_PARTITION\"," + - " \"WS_TXNID\", \"WS_COMMIT_ID\", \"WS_OPERATION_TYPE\")" + - " SELECT DISTINCT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_TXNID\", " + tempCommitId + ", \"TC_OPERATION_TYPE\" "; - - if (isUpdateOrDelete(stmt, conflictSQLSuffix)) { - isUpdateDelete = 'Y'; - //if here it means currently committing txn performed update/delete and we should check WW conflict - /** - * "select distinct" is used below because - * 1. once we get to multi-statement txns, we only care to record that something was updated once - * 2. if {@link #addDynamicPartitions(AddDynamicPartitions)} is retried by caller it may create - * duplicate entries in TXN_COMPONENTS - * but we want to add a PK on WRITE_SET which won't have unique rows w/o this distinct - * even if it includes all of its columns - * - * First insert into write_set using a temporary commitID, which will be updated in a separate call, - * see: {@link #updateWSCommitIdAndCleanUpMetadata(Statement, long, TxnType, Long, long)}}. - * This should decrease the scope of the S4U lock on the next_txn_id table. - */ - Object undoWriteSetForCurrentTxn = context.getTransactionStatus().createSavepoint(); - stmt.executeUpdate(writeSetInsertSql + (useMinHistoryLevel ? conflictSQLSuffix : - "FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\"=" + txnid + " AND \"TC_OPERATION_TYPE\" <> " + OperationType.COMPACT)); - - /** - * This S4U will mutex with other commitTxn() and openTxns(). - * -1 below makes txn intervals look like [3,3] [4,4] if all txns are serial - * Note: it's possible to have several txns have the same commit id. Suppose 3 txns start - * at the same time and no new txns start until all 3 commit. - * We could've incremented the sequence for commitId as well but it doesn't add anything functionally. - */ - acquireTxnLock(stmt, false); - commitId = getHighWaterMark(stmt); - - if (!rqst.isExclWriteEnabled()) { - /** - * see if there are any overlapping txns that wrote the same element, i.e. have a conflict - * Since entire commit operation is mutexed wrt other start/commit ops, - * committed.ws_commit_id <= current.ws_commit_id for all txns - * thus if committed.ws_commit_id < current.ws_txnid, transactions do NOT overlap - * For example, [17,20] is committed, [6,80] is being committed right now - these overlap - * [17,20] committed and [21,21] committing now - these do not overlap. - * [17,18] committed and [18,19] committing now - these overlap (here 18 started while 17 was still running) - */ - try (ResultSet rs = checkForWriteConflict(stmt, txnid)) { - if (rs.next()) { - //found a conflict, so let's abort the txn - String committedTxn = "[" + JavaUtils.txnIdToString(rs.getLong(1)) + "," + rs.getLong(2) + "]"; - StringBuilder resource = new StringBuilder(rs.getString(3)).append("/").append(rs.getString(4)); - String partitionName = rs.getString(5); - if (partitionName != null) { - resource.append('/').append(partitionName); - } - String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + commitId + "]" + " due to a write conflict on " + resource + - " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8); - //remove WRITE_SET info for current txn since it's about to abort - context.getTransactionStatus().rollbackToSavepoint(undoWriteSetForCurrentTxn); - LOG.info(msg); - //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this - if (abortTxns(dbConn, Collections.singletonList(txnid), false, isReplayedReplTxn, - TxnErrorMsg.ABORT_WRITE_CONFLICT) != 1) { - throw new IllegalStateException(msg + " FAILED!"); - } - jdbcResource.getTransactionManager().commit(context); - throw new TxnAbortedException(msg); - } - } - } - } else if (!useMinHistoryLevel) { - stmt.executeUpdate(writeSetInsertSql + "FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\"=" + txnid + - " AND \"TC_OPERATION_TYPE\" <> " + OperationType.COMPACT); - commitId = getHighWaterMark(stmt); - } - } else { - /* - * current txn didn't update/delete anything (may have inserted), so just proceed with commit - * - * We only care about commit id for write txns, so for RO (when supported) txns we don't - * have to mutex on NEXT_TXN_ID. - * Consider: if RO txn is after a W txn, then RO's openTxns() will be mutexed with W's - * commitTxn() because both do S4U on NEXT_TXN_ID and thus RO will see result of W txn. - * If RO < W, then there is no reads-from relationship. - * In replication flow we don't expect any write write conflict as it should have been handled at source. - */ - assert true; - } - - - if (txnType != TxnType.READ_ONLY && !isReplayedReplTxn && !MetaStoreServerUtils.isCompactionTxn(txnType)) { - moveTxnComponentsToCompleted(stmt, txnid, isUpdateDelete); - } else if (isReplayedReplTxn) { - if (rqst.isSetWriteEventInfos()) { - String sql = String.format(COMPL_TXN_COMPONENTS_INSERT_QUERY, txnid, quoteChar(isUpdateDelete)); - try (PreparedStatement pstmt = dbConn.prepareStatement(sql)) { - int insertCounter = 0; - for (WriteEventInfo writeEventInfo : rqst.getWriteEventInfos()) { - pstmt.setString(1, writeEventInfo.getDatabase()); - pstmt.setString(2, writeEventInfo.getTable()); - pstmt.setString(3, writeEventInfo.getPartition()); - pstmt.setLong(4, writeEventInfo.getWriteId()); - - pstmt.addBatch(); - insertCounter++; - if (insertCounter % maxBatchSize == 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", sql, maxBatchSize); - pstmt.executeBatch(); - } - } - if (insertCounter % maxBatchSize != 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", sql, insertCounter % maxBatchSize); - pstmt.executeBatch(); - } - } - } - deleteReplTxnMapEntry(dbConn, sourceTxnId, rqst.getReplPolicy()); - } - updateWSCommitIdAndCleanUpMetadata(stmt, txnid, txnType, commitId, tempCommitId); - removeTxnsFromMinHistoryLevel(dbConn, ImmutableList.of(txnid)); - removeWriteIdsFromMinHistory(dbConn, ImmutableList.of(txnid)); - if (rqst.isSetKeyValue()) { - updateKeyValueAssociatedWithTxn(rqst, stmt); - } - - if (!isHiveReplTxn) { - createCommitNotificationEvent(dbConn, txnid , txnType); - } - - LOG.debug("Going to commit"); - jdbcResource.getTransactionManager().commit(context); - - if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { - Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_COMMITTED_TXNS).inc(); - } - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - jdbcResource.getTransactionManager().rollback(context); - checkRetryable(e, "commitTxn(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " - + StringUtils.stringifyException(e)); - } finally { - closeStmt(stmt); - unlockInternal(); - } - } catch (RetryException e) { - commitTxn(rqst); - } finally { - jdbcResource.unbindDataSource(); - } - } - - /** - * Create Notifiaction Events on txn commit - * @param txnid committed txn - * @param txnType transaction type - * @throws MetaException ex - */ - protected void createCommitNotificationEvent(Connection conn, long txnid, TxnType txnType) - throws MetaException, SQLException { - if (transactionalListeners != null) { - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, txnType), conn, sqlGenerator); - } - } - - private boolean isUpdateOrDelete(Statement stmt, String conflictSQLSuffix) throws SQLException, MetaException { - try (ResultSet rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, - "\"TC_OPERATION_TYPE\" " + conflictSQLSuffix))) { - return rs.next(); - } + new CommitTxnFunction(rqst, transactionalListeners).execute(jdbcResource); } /** @@ -1694,4085 +591,364 @@ private boolean isUpdateOrDelete(Statement stmt, String conflictSQLSuffix) throw * @return max Id for the conflicting transaction, if any, otherwise -1 * @throws MetaException */ - @RetrySemantics.ReadOnly + @Override public long getLatestTxnIdInConflict(long txnid) throws MetaException { - try { - try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - Statement stmt = dbConn.createStatement()) { - - String writeConflictQuery = "SELECT MAX(\"COMMITTED\".\"WS_TXNID\")" + - " FROM \"WRITE_SET\" \"COMMITTED\"" + - " INNER JOIN (" + - " SELECT DISTINCT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_TXNID\"" + - " FROM \"TXN_COMPONENTS\"" + - " WHERE \"TC_TXNID\" = " + txnid + - " AND \"TC_OPERATION_TYPE\" IN (" + OperationType.UPDATE + "," + OperationType.DELETE + ")" + - " ) \"CUR\"" + - " ON \"COMMITTED\".\"WS_DATABASE\" = \"CUR\".\"TC_DATABASE\"" + - " AND \"COMMITTED\".\"WS_TABLE\" = \"CUR\".\"TC_TABLE\"" + - (useMinHistoryLevel ? "" : - " AND \"COMMITTED\".\"WS_OPERATION_TYPE\" != " + OperationType.INSERT) + - // For partitioned table we always track writes at partition level (never at table) - // and for non partitioned - always at table level, thus the same table should never - // have entries with partition key and w/o - " AND (\"COMMITTED\".\"WS_PARTITION\" = \"CUR\".\"TC_PARTITION\" OR" + - " \"CUR\".\"TC_PARTITION\" IS NULL) " + - // txns overlap - " WHERE \"CUR\".\"TC_TXNID\" <= \"COMMITTED\".\"WS_COMMIT_ID\""; - - LOG.debug("Going to execute query: <{}>", writeConflictQuery); - try (ResultSet rs = stmt.executeQuery(writeConflictQuery)) { - return rs.next() ? rs.getLong(1) : -1; - } - } catch (SQLException e) { - checkRetryable(e, "getLatestTxnIdInConflict"); - throw new MetaException(StringUtils.stringifyException(e)); - } - } catch (RetryException e) { - return getLatestTxnIdInConflict(txnid); - } + return jdbcResource.execute(new LatestTxnIdInConflictHandler(txnid)); } /** - * Returns the databases updated by txnId. - * Queries TXN_TO_WRITE_ID using txnId. - * - * @param txnId + * Replicate Table Write Ids state to mark aborted write ids and writeid high watermark. + * @param rqst info on table/partitions and writeid snapshot to replicate. * @throws MetaException */ - private List getTxnDbsUpdated(long txnId, Connection dbConn) throws MetaException { - try { - try (Statement stmt = dbConn.createStatement()) { + @Override + public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException { + new ReplTableWriteIdStateFunction(rqst, mutexAPI, transactionalListeners).execute(jdbcResource); + } - String query = "SELECT DISTINCT \"T2W_DATABASE\" " + - " FROM \"TXN_TO_WRITE_ID\" \"COMMITTED\"" + - " WHERE \"T2W_TXNID\" = " + txnId; + @Override + public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws MetaException { + return new GetValidWriteIdsFunction(rqst, openTxnTimeOutMillis).execute(jdbcResource); + } + + @Override + public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst) throws MetaException { + return new AllocateTableWriteIdsFunction(rqst, transactionalListeners).execute(jdbcResource); + } - LOG.debug("Going to execute query: <{}>", query); - try (ResultSet rs = stmt.executeQuery(query)) { - List dbsUpdated = new ArrayList(); - while (rs.next()) { - dbsUpdated.add(rs.getString(1)); - } - return dbsUpdated; - } - } catch (SQLException e) { - checkRetryable(e, "getTxnDbsUpdated"); - throw new MetaException(StringUtils.stringifyException(e)); - } - } catch (RetryException e) { - return getTxnDbsUpdated(txnId, dbConn); - } + @Override + public MaxAllocatedTableWriteIdResponse getMaxAllocatedTableWrited(MaxAllocatedTableWriteIdRequest rqst) throws MetaException { + return jdbcResource.execute(new GetMaxAllocatedTableWriteIdHandler(rqst)); } + @Override + public void seedWriteId(SeedTableWriteIdsRequest rqst) throws MetaException { + //since this is on conversion from non-acid to acid, NEXT_WRITE_ID should not have an entry + //for this table. It also has a unique index in case 'should not' is violated - private ResultSet checkForWriteConflict(Statement stmt, long txnid) throws SQLException, MetaException { - String writeConflictQuery = sqlGenerator.addLimitClause(1, "\"COMMITTED\".\"WS_TXNID\", \"COMMITTED\".\"WS_COMMIT_ID\", " + - "\"COMMITTED\".\"WS_DATABASE\", \"COMMITTED\".\"WS_TABLE\", \"COMMITTED\".\"WS_PARTITION\", " + - "\"CUR\".\"WS_COMMIT_ID\" \"CUR_WS_COMMIT_ID\", \"CUR\".\"WS_OPERATION_TYPE\" \"CUR_OP\", " + - "\"COMMITTED\".\"WS_OPERATION_TYPE\" \"COMMITTED_OP\" FROM \"WRITE_SET\" \"COMMITTED\" INNER JOIN \"WRITE_SET\" \"CUR\" " + - "ON \"COMMITTED\".\"WS_DATABASE\"=\"CUR\".\"WS_DATABASE\" AND \"COMMITTED\".\"WS_TABLE\"=\"CUR\".\"WS_TABLE\" " + - //For partitioned table we always track writes at partition level (never at table) - //and for non partitioned - always at table level, thus the same table should never - //have entries with partition key and w/o - "AND (\"COMMITTED\".\"WS_PARTITION\"=\"CUR\".\"WS_PARTITION\" OR (\"COMMITTED\".\"WS_PARTITION\" IS NULL AND \"CUR\".\"WS_PARTITION\" IS NULL)) " + - "WHERE \"CUR\".\"WS_TXNID\" <= \"COMMITTED\".\"WS_COMMIT_ID\"" + //txns overlap; could replace ws_txnid - // with txnid, though any decent DB should infer this - " AND \"CUR\".\"WS_TXNID\"=" + txnid + //make sure RHS of join only has rows we just inserted as - // part of this commitTxn() op - " AND \"COMMITTED\".\"WS_TXNID\" <> " + txnid + //and LHS only has committed txns - //U+U and U+D and D+D is a conflict and we don't currently track I in WRITE_SET at all - //it may seem like D+D should not be in conflict but consider 2 multi-stmt txns - //where each does "delete X + insert X, where X is a row with the same PK. This is - //equivalent to an update of X but won't be in conflict unless D+D is in conflict. - //The same happens when Hive splits U=I+D early so it looks like 2 branches of a - //multi-insert stmt (an Insert and a Delete branch). It also 'feels' - // un-serializable to allow concurrent deletes - " and (\"COMMITTED\".\"WS_OPERATION_TYPE\" IN(" + OperationType.UPDATE + - ", " + OperationType.DELETE + - ") AND \"CUR\".\"WS_OPERATION_TYPE\" IN(" + OperationType.UPDATE+ ", " - + OperationType.DELETE + "))"); - LOG.debug("Going to execute query: <{}>", writeConflictQuery); - return stmt.executeQuery(writeConflictQuery); + // First allocation of write id should add the table to the next_write_id meta table + // The initial value for write id should be 1 and hence we add 1 with number of write ids + // allocated here + jdbcResource.getJdbcTemplate().update( + "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (:db, :table, :writeId)", + new MapSqlParameterSource() + .addValue("db", rqst.getDbName()) + .addValue("table", rqst.getTableName()) + .addValue("writeId", rqst.getSeedWriteId() + 1)); } - private void moveTxnComponentsToCompleted(Statement stmt, long txnid, char isUpdateDelete) throws SQLException { - // Move the record from txn_components into completed_txn_components so that the compactor - // knows where to look to compact. - String s = "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" (\"CTC_TXNID\", \"CTC_DATABASE\", " + - "\"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\") SELECT \"TC_TXNID\"," + - " \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_WRITEID\", '" + isUpdateDelete + - "' FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\" = " + txnid + - //we only track compactor activity in TXN_COMPONENTS to handle the case where the - //compactor txn aborts - so don't bother copying it to COMPLETED_TXN_COMPONENTS - " AND \"TC_OPERATION_TYPE\" <> " + OperationType.COMPACT; - LOG.debug("Going to execute insert <{}>", s); - - if ((stmt.executeUpdate(s)) < 1) { - //this can be reasonable for an empty txn START/COMMIT or read-only txn - //also an IUD with DP that didn't match any rows. - LOG.info("Expected to move at least one record from txn_components to " - + "completed_txn_components when committing txn! {}", JavaUtils.txnIdToString(txnid)); + @Override + public void seedTxnId(SeedTxnIdRequest rqst) throws MetaException { + /* + * Locking the txnLock an exclusive way, we do not want to set the txnId backward accidentally + * if there are concurrent open transactions + */ + acquireTxnLock(false); + long highWaterMark = jdbcResource.execute(new GetHighWaterMarkHandler()); + if (highWaterMark >= rqst.getSeedTxnId()) { + throw new MetaException(MessageFormat + .format("Invalid txnId seed {}, the highWaterMark is {}", rqst.getSeedTxnId(), highWaterMark)); } + jdbcResource.getJdbcTemplate().getJdbcTemplate() + .execute((Statement stmt) -> stmt.execute(dbProduct.getTxnSeedFn(rqst.getSeedTxnId()))); } - /** - * See overridden method in CompactionTxnHandler also. - */ - protected void updateWSCommitIdAndCleanUpMetadata(Statement stmt, long txnid, TxnType txnType, - Long commitId, long tempId) throws SQLException, MetaException { - List queryBatch = new ArrayList<>(5); - // update write_set with real commitId - if (commitId != null) { - queryBatch.add("UPDATE \"WRITE_SET\" SET \"WS_COMMIT_ID\" = " + commitId + - " WHERE \"WS_COMMIT_ID\" = " + tempId + " AND \"WS_TXNID\" = " + txnid); - } - // clean up txn related metadata - if (txnType != TxnType.READ_ONLY) { - queryBatch.add("DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\" = " + txnid); - } - queryBatch.add("DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_TXNID\" = " + txnid); - // DO NOT remove the transaction from the TXN table, the cleaner will remove it when appropriate - queryBatch.add("UPDATE \"TXNS\" SET \"TXN_STATE\" = " + TxnStatus.COMMITTED + " WHERE \"TXN_ID\" = " + txnid); - if (txnType == TxnType.MATER_VIEW_REBUILD) { - queryBatch.add("DELETE FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE \"MRL_TXN_ID\" = " + txnid); - } - // execute all in one batch - executeQueriesInBatchNoCount(dbProduct, stmt, queryBatch, maxBatchSize); + @Override + public void addWriteNotificationLog(ListenerEvent acidWriteEvent) throws MetaException { + MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, + acidWriteEvent instanceof AcidWriteEvent ? EventMessage.EventType.ACID_WRITE + : EventMessage.EventType.BATCH_ACID_WRITE, + acidWriteEvent, jdbcResource.getConnection(), sqlGenerator); } - private void updateKeyValueAssociatedWithTxn(CommitTxnRequest rqst, Statement stmt) throws SQLException { - if (!rqst.getKeyValue().getKey().startsWith(TxnStore.TXN_KEY_START)) { - String errorMsg = "Error updating key/value in the sql backend with" - + " txnId=" + rqst.getTxnid() + "," - + " tableId=" + rqst.getKeyValue().getTableId() + "," - + " key=" + rqst.getKeyValue().getKey() + "," - + " value=" + rqst.getKeyValue().getValue() + "." - + " key should start with " + TXN_KEY_START + "."; - LOG.warn(errorMsg); - throw new IllegalArgumentException(errorMsg); - } - String s = "UPDATE \"TABLE_PARAMS\" SET" - + " \"PARAM_VALUE\" = " + quoteString(rqst.getKeyValue().getValue()) - + " WHERE \"TBL_ID\" = " + rqst.getKeyValue().getTableId() - + " AND \"PARAM_KEY\" = " + quoteString(rqst.getKeyValue().getKey()); - LOG.debug("Going to execute update <{}>", s); - int affectedRows = stmt.executeUpdate(s); - if (affectedRows != 1) { - String errorMsg = "Error updating key/value in the sql backend with" - + " txnId=" + rqst.getTxnid() + "," - + " tableId=" + rqst.getKeyValue().getTableId() + "," - + " key=" + rqst.getKeyValue().getKey() + "," - + " value=" + rqst.getKeyValue().getValue() + "." - + " Only one row should have been affected but " - + affectedRows + " rows where affected."; - LOG.warn(errorMsg); - throw new IllegalStateException(errorMsg); - } + @Override + public void performWriteSetGC() throws MetaException { + long commitHighWaterMark = new MinOpenTxnIdWaterMarkFunction(openTxnTimeOutMillis).execute(jdbcResource); + jdbcResource.getJdbcTemplate().update( + "DELETE FROM \"WRITE_SET\" WHERE \"WS_COMMIT_ID\" < :hwm", + new MapSqlParameterSource() + .addValue("hwm", commitHighWaterMark)); + } + + @Override + public void updateTransactionStatistics(UpdateTransactionalStatsRequest req) throws MetaException { + jdbcResource.execute( + "UPDATE \"MV_TABLES_USED\" " + + "SET \"INSERTED_COUNT\"=\"INSERTED_COUNT\"+ :insertCount" + + ",\"UPDATED_COUNT\"=\"UPDATED_COUNT\"+ :updateCount" + + ",\"DELETED_COUNT\"=\"DELETED_COUNT\"+ :deleteCount" + + " WHERE \"TBL_ID\"= :tableId", + new MapSqlParameterSource() + .addValue("insertCount", req.getInsertCount()) + .addValue("updateCount", req.getUpdatedCount()) + .addValue("deleteCount", req.getDeletedCount()) + .addValue("tableId", req.getTableId()), null); } /** - * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark. - * @param rqst info on table/partitions and writeid snapshot to replicate. - * @throws MetaException + * Get invalidation info for the materialization. Materialization information + * contains information about whether there was update/delete/compaction operations on the source + * tables used by the materialization since it was created. */ @Override - @RetrySemantics.Idempotent("No-op if already replicated the writeid state") - public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException { - String dbName = rqst.getDbName().toLowerCase(); - String tblName = rqst.getTableName().toLowerCase(); - ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(rqst.getValidWriteIdlist()); + public Materialization getMaterializationInvalidationInfo( + CreationMetadata creationMetadata, String validTxnListStr) throws MetaException { + return new GetMaterializationInvalidationInfoFunction(creationMetadata, validTxnListStr).execute(jdbcResource); + } - // Get the abortedWriteIds which are already sorted in ascending order. - List abortedWriteIds = getAbortedWriteIds(validWriteIdList); - int numAbortedWrites = abortedWriteIds.size(); - try { - Connection dbConn = null; - Statement stmt = null; - PreparedStatement pStmt = null; - List insertPreparedStmts = null; - ResultSet rs = null; - List params = Arrays.asList(dbName, tblName); - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); + @Override + public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws MetaException { + return new LockMaterializationRebuildFunction(dbName, tableName, txnId, mutexAPI).execute(jdbcResource); + } - // Check if this txn state is already replicated for this given table. If yes, then it is - // idempotent case and just return. - String sql = "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = ? AND \"NWI_TABLE\" = ?"; - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, sql, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + sql.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - rs = pStmt.executeQuery(); - if (rs.next()) { - LOG.info("Idempotent flow: WriteId state <{}> is already applied for the table: {}.{}", validWriteIdList, - dbName, tblName); - rollbackDBConn(dbConn); - return; - } + @Override + public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws MetaException { + int result = jdbcResource.execute( + "UPDATE \"MATERIALIZATION_REBUILD_LOCKS\"" + + " SET \"MRL_LAST_HEARTBEAT\" = " + Instant.now().toEpochMilli() + + " WHERE \"MRL_TXN_ID\" = " + txnId + + " AND \"MRL_DB_NAME\" = ?" + + " AND \"MRL_TBL_NAME\" = ?", + new MapSqlParameterSource() + .addValue("now", Instant.now().toEpochMilli()) + .addValue("txnId", txnId) + .addValue("dbName", dbName) + .addValue("tableNane", tableName), + ParameterizedCommand.AT_LEAST_ONE_ROW); + return result >= 1; + } - if (numAbortedWrites > 0) { - // Allocate/Map one txn per aborted writeId and abort the txn to mark writeid as aborted. - // We don't use the txnLock, all of these transactions will be aborted in this one rdbm transaction - // So they will not effect the commitTxn in any way - List txnIds = openTxns(dbConn, - new OpenTxnRequest(numAbortedWrites, rqst.getUser(), rqst.getHostName())); - assert(numAbortedWrites == txnIds.size()); - - // Map each aborted write id with each allocated txn. - List rows = new ArrayList<>(); - List> paramsList = new ArrayList<>(); - int i = 0; - for (long txn : txnIds) { - long writeId = abortedWriteIds.get(i++); - rows.add(txn + ", ?, ?, " + writeId); - paramsList.add(params); - LOG.info("Allocated writeID: {} for txnId: {}", writeId, txn); - } - - // Insert entries to TXN_TO_WRITE_ID for aborted write ids - insertPreparedStmts = sqlGenerator.createInsertValuesPreparedStmt(dbConn, - "\"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\")", rows, - paramsList); - for (PreparedStatement pst : insertPreparedStmts) { - pst.execute(); - } - - // Abort all the allocated txns so that the mapped write ids are referred as aborted ones. - int numAborts = abortTxns(dbConn, txnIds, false, false, TxnErrorMsg.ABORT_REPL_WRITEID_TXN); - assert(numAborts == numAbortedWrites); - } - - // There are some txns in the list which has no write id allocated and hence go ahead and do it. - // Get the next write id for the given table and update it with new next write id. - // It is expected NEXT_WRITE_ID doesn't have entry for this table and hence directly insert it. - long nextWriteId = validWriteIdList.getHighWatermark() + 1; - - // First allocation of write id (hwm+1) should add the table to the next_write_id meta table. - sql = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (?, ?, " - + Long.toString(nextWriteId) + ")"; - closeStmt(pStmt); - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, sql, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute insert <" + sql.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - pStmt.execute(); - - LOG.info("WriteId state <{}> is applied for the table: {}.{}", validWriteIdList, dbName, tblName); - LOG.debug("Going to commit"); - dbConn.commit(); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "replTableWriteIdState(" + rqst + ")", true); - throw new MetaException("Unable to update transaction database " - + StringUtils.stringifyException(e)); - } finally { - if (insertPreparedStmts != null) { - for (PreparedStatement pst : insertPreparedStmts) { - closeStmt(pst); - } - } - closeStmt(pStmt); - close(rs, stmt, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - replTableWriteIdState(rqst); - } - - // Schedule Major compaction on all the partitions/table to clean aborted data - if (numAbortedWrites > 0) { - CompactionRequest compactRqst = new CompactionRequest(rqst.getDbName(), rqst.getTableName(), - CompactionType.MAJOR); - if (rqst.isSetPartNames()) { - for (String partName : rqst.getPartNames()) { - compactRqst.setPartitionname(partName); - compact(compactRqst); - } - } else { - compact(compactRqst); - } - } - } - - private List getAbortedWriteIds(ValidWriteIdList validWriteIdList) { - return Arrays.stream(validWriteIdList.getInvalidWriteIds()) - .filter(validWriteIdList::isWriteIdAborted) - .boxed() - .collect(Collectors.toList()); - } - - private ValidTxnList getValidTxnList(Connection dbConn, String fullTableName, Long writeId) throws MetaException, - SQLException { - PreparedStatement pst = null; - ResultSet rs = null; - try { - String[] names = TxnUtils.getDbTableName(fullTableName); - assert names.length == 2; - List params = Arrays.asList(names[0], names[1]); - String s = - "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = ? AND " - + "\"T2W_TABLE\" = ? AND \"T2W_WRITEID\" = "+ writeId; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + s.replace("?", "{}") + ">", quoteString(names[0]), - quoteString(names[1])); - } - rs = pst.executeQuery(); - if (rs.next()) { - long txnId = rs.getLong(1); - return TxnCommonUtils.createValidReadTxnList(getOpenTxns(dbConn), txnId); - } - throw new MetaException("invalid write id " + writeId + " for table " + fullTableName); - } finally { - close(rs, pst, null); - } - } - - @Override - @RetrySemantics.ReadOnly - public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws MetaException { - try { - Connection dbConn = null; - ValidTxnList validTxnList; - - try { - /** - * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} - */ - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - // We should prepare the valid write ids list based on validTxnList of current txn. - // If no txn exists in the caller, then they would pass null for validTxnList and so it is - // required to get the current state of txns to make validTxnList - if (rqst.isSetValidTxnList()) { - assert rqst.isSetWriteId() == false; - validTxnList = new ValidReadTxnList(rqst.getValidTxnList()); - } else if (rqst.isSetWriteId()) { - validTxnList = getValidTxnList(dbConn, rqst.getFullTableNames().get(0), rqst.getWriteId()); - } else { - // Passing 0 for currentTxn means, this validTxnList is not wrt to any txn - validTxnList = TxnCommonUtils.createValidReadTxnList(getOpenTxns(dbConn), 0); - } - - // Get the valid write id list for all the tables read by the current txn - List tblValidWriteIdsList = new ArrayList<>(); - for (String fullTableName : rqst.getFullTableNames()) { - tblValidWriteIdsList.add(getValidWriteIdsForTable(dbConn, fullTableName, validTxnList)); - } - - GetValidWriteIdsResponse owr = new GetValidWriteIdsResponse(tblValidWriteIdsList); - return owr; - } catch (SQLException e) { - checkRetryable(e, "getValidWriteIds"); - throw new MetaException("Unable to select from transaction database, " - + StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - } - } catch (RetryException e) { - return getValidWriteIds(rqst); - } - } - - // Method to get the Valid write ids list for the given table - // Input fullTableName is expected to be of format . - private TableValidWriteIds getValidWriteIdsForTable(Connection dbConn, String fullTableName, - ValidTxnList validTxnList) throws SQLException { - PreparedStatement pst = null; - ResultSet rs = null; - String[] names = TxnUtils.getDbTableName(fullTableName); - assert(names.length == 2); - List params = Arrays.asList(names[0], names[1]); - try { - // Need to initialize to 0 to make sure if nobody modified this table, then current txn - // shouldn't read any data. - // If there is a conversion from non-acid to acid table, then by default 0 would be assigned as - // writeId for data from non-acid table and so writeIdHwm=0 would ensure those data are readable by any txns. - long writeIdHwm = 0; - List invalidWriteIdList = new ArrayList<>(); - long minOpenWriteId = Long.MAX_VALUE; - BitSet abortedBits = new BitSet(); - long txnHwm = validTxnList.getHighWatermark(); - - // Find the writeId high water mark based upon txnId high water mark. If found, then, need to - // traverse through all write Ids less than writeId HWM to make exceptions list. - // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm)) - String s = "SELECT MAX(\"T2W_WRITEID\") FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" <= " + txnHwm - + " AND \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ?"; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query<" + s.replace("?", "{}") + ">", - quoteString(names[0]), quoteString(names[1])); - } - rs = pst.executeQuery(); - if (rs.next()) { - writeIdHwm = rs.getLong(1); - } - - // If no writeIds allocated by txns under txnHwm, then find writeHwm from NEXT_WRITE_ID. - if (writeIdHwm <= 0) { - // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest - // allocated write id. - s = "SELECT \"NWI_NEXT\"-1 FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = ? AND \"NWI_TABLE\" = ?"; - closeStmt(pst); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query<" + s.replace("?", "{}") + ">", - quoteString(names[0]), quoteString(names[1])); - } - rs = pst.executeQuery(); - if (rs.next()) { - writeIdHwm = rs.getLong(1); - } - } - boolean foundValidUncompactedWrite = false; - // As writeIdHwm is known, query all writeIds under the writeId HWM. - // If any writeId under HWM is allocated by txn > txnId HWM or belongs to open/aborted txns, - // then will be added to invalid list. The results should be sorted in ascending order based - // on write id. The sorting is needed as exceptions list in ValidWriteIdList would be looked-up - // using binary search. - s = "SELECT \"T2W_TXNID\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_WRITEID\" <= " + Long.toString(writeIdHwm) - + " AND \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? ORDER BY \"T2W_WRITEID\" ASC"; - closeStmt(pst); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query<" + s.replace("?", "{}") + ">", - quoteString(names[0]), quoteString(names[1])); - } - rs = pst.executeQuery(); - while (rs.next()) { - long txnId = rs.getLong(1); - long writeId = rs.getLong(2); - if (validTxnList.isTxnValid(txnId)) { - // Skip if the transaction under evaluation is already committed. - foundValidUncompactedWrite = true; - continue; - } - // The current txn is either in open or aborted state. - // Mark the write ids state as per the txn state. - invalidWriteIdList.add(writeId); - if (validTxnList.isTxnAborted(txnId)) { - abortedBits.set(invalidWriteIdList.size() - 1); - } else { - minOpenWriteId = Math.min(minOpenWriteId, writeId); - } - } - // If we have compacted writes and some invalid writes on the table, - // return the lowest invalid write as a writeIdHwm and set it as invalid. - if (!foundValidUncompactedWrite) { - long writeId = invalidWriteIdList.isEmpty() ? -1 : invalidWriteIdList.get(0); - invalidWriteIdList = new ArrayList<>(); - abortedBits = new BitSet(); - - if (writeId != -1) { - invalidWriteIdList.add(writeId); - writeIdHwm = writeId; - if (writeId != minOpenWriteId) { - abortedBits.set(0); - } - } - } - ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray()); - TableValidWriteIds owi = new TableValidWriteIds(fullTableName, writeIdHwm, invalidWriteIdList, byteBuffer); - if (minOpenWriteId < Long.MAX_VALUE) { - owi.setMinOpenWriteId(minOpenWriteId); - } - return owi; - } finally { - closeStmt(pst); - close(rs); - } - } - - @Override - @RetrySemantics.Idempotent - public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst) - throws MetaException { - List txnIds; - String dbName = rqst.getDbName().toLowerCase(); - String tblName = rqst.getTableName().toLowerCase(); - boolean shouldReallocate = rqst.isReallocate(); - try { - Connection dbConn = null; - PreparedStatement pStmt = null; - ResultSet rs = null; - List txnToWriteIds = new ArrayList<>(); - List srcTxnToWriteIds = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - if (rqst.isSetReplPolicy()) { - srcTxnToWriteIds = rqst.getSrcTxnToWriteIdList(); - List srcTxnIds = new ArrayList<>(); - assert (rqst.isSetSrcTxnToWriteIdList()); - assert (!rqst.isSetTxnIds()); - assert (!srcTxnToWriteIds.isEmpty()); - - for (TxnToWriteId txnToWriteId : srcTxnToWriteIds) { - srcTxnIds.add(txnToWriteId.getTxnId()); - } - txnIds = getTargetTxnIdList(rqst.getReplPolicy(), srcTxnIds, dbConn); - if (srcTxnIds.size() != txnIds.size()) { - // Idempotent case where txn was already closed but gets allocate write id event. - // So, just ignore it and return empty list. - LOG.info("Idempotent case: Target txn id is missing for source txn id : {} and repl policy {}", srcTxnIds, - rqst.getReplPolicy()); - return new AllocateTableWriteIdsResponse(txnToWriteIds); - } - } else { - assert (!rqst.isSetSrcTxnToWriteIdList()); - assert (rqst.isSetTxnIds()); - txnIds = rqst.getTxnIds(); - } - - //Easiest check since we can't differentiate do we handle singleton list or list with multiple txn ids. - if (txnIds.size() > 1) { - Collections.sort(txnIds); //easier to read logs and for assumption done in replication flow - } - - // Check if all the input txns are in valid state. - // Write IDs should be allocated only for open and not read-only transactions. - try (Statement stmt = dbConn.createStatement()) { - if (!isTxnsOpenAndNotReadOnly(txnIds, stmt)) { - String errorMsg = "Write ID allocation on " + TableName.getDbTable(dbName, tblName) - + " failed for input txns: " - + getAbortedAndReadOnlyTxns(txnIds, stmt) - + getCommittedTxns(txnIds, stmt); - LOG.error(errorMsg); - - throw new IllegalStateException("Write ID allocation failed on " + TableName.getDbTable(dbName, tblName) - + " as not all input txns in open state or read-only"); - } - } - - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - long writeId; - int allocatedTxnsCount = 0; - List params = Arrays.asList(dbName, tblName); - if (shouldReallocate) { - // during query recompilation after lock acquistion, it is important to realloc new writeIds - // to ensure writeIds are committed in increasing order. - prefix.append("DELETE FROM \"TXN_TO_WRITE_ID\" WHERE") - .append(" \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, - txnIds, "\"T2W_TXNID\"", false, false); - for (String query : queries) { - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute delete <" + query.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - int numRowsDeleted = pStmt.executeUpdate(); - LOG.info("Removed {} prior writeIds during reallocation", numRowsDeleted); - closeStmt(pStmt); - } - } else { - // Traverse the TXN_TO_WRITE_ID to see if any of the input txns already have allocated a - // write id for the same db.table. If yes, then need to reuse it else have to allocate new one - // The write id would have been already allocated in case of multi-statement txns where - // first write on a table will allocate write id and rest of the writes should re-use it. - prefix.append("SELECT \"T2W_TXNID\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE") - .append(" \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, - txnIds, "\"T2W_TXNID\"", false, false); - for (String query : queries) { - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + query.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - rs = pStmt.executeQuery(); - while (rs.next()) { - // If table write ID is already allocated for the given transaction, then just use it - long txnId = rs.getLong(1); - writeId = rs.getLong(2); - txnToWriteIds.add(new TxnToWriteId(txnId, writeId)); - allocatedTxnsCount++; - LOG.info("Reused already allocated writeID: {} for txnId: {}", writeId, txnId); - } - closeStmt(pStmt); - } - } - - // Batch allocation should always happen atomically. Either write ids for all txns is allocated or none. - long numOfWriteIds = txnIds.size(); - assert ((allocatedTxnsCount == 0) || (numOfWriteIds == allocatedTxnsCount)); - if (allocatedTxnsCount == numOfWriteIds) { - // If all the txns in the list have pre-allocated write ids for the given table, then just return. - // This is for idempotent case. - return new AllocateTableWriteIdsResponse(txnToWriteIds); - } - - long srcWriteId = 0; - if (rqst.isSetReplPolicy()) { - // In replication flow, we always need to allocate write ID equal to that of source. - assert (srcTxnToWriteIds != null); - srcWriteId = srcTxnToWriteIds.get(0).getWriteId(); - } - - - // There are some txns in the list which does not have write id allocated and hence go ahead and do it. - // Get the next write id for the given table and update it with new next write id. - // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID - String s = sqlGenerator.addForUpdateClause( - "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = ? AND \"NWI_TABLE\" = ?"); - closeStmt(pStmt); - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + s.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - rs = pStmt.executeQuery(); - if (!rs.next()) { - // First allocation of write id should add the table to the next_write_id meta table - // The initial value for write id should be 1 and hence we add 1 with number of write ids allocated here - // For repl flow, we need to force set the incoming write id. - writeId = (srcWriteId > 0) ? srcWriteId : 1; - s = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (?, ?, " - + (writeId + numOfWriteIds) + ")"; - closeStmt(pStmt); - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute insert <" + s.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - pStmt.execute(); - } else { - long nextWriteId = rs.getLong(1); - writeId = (srcWriteId > 0) ? srcWriteId : nextWriteId; - - // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated - s = "UPDATE \"NEXT_WRITE_ID\" SET \"NWI_NEXT\" = " + (writeId + numOfWriteIds) - + " WHERE \"NWI_DATABASE\" = ? AND \"NWI_TABLE\" = ?"; - closeStmt(pStmt); - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute update <" + s.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - pStmt.executeUpdate(); - - // For repl flow, if the source write id is mismatching with target next write id, then current - // metadata in TXN_TO_WRITE_ID is stale for this table and hence need to clean-up TXN_TO_WRITE_ID. - // This is possible in case of first incremental repl after bootstrap where concurrent write - // and drop table was performed at source during bootstrap dump. - if ((srcWriteId > 0) && (srcWriteId != nextWriteId)) { - s = "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ?"; - closeStmt(pStmt); - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute delete <" + s.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - pStmt.executeUpdate(); - } - } - - // Map the newly allocated write ids against the list of txns which doesn't have pre-allocated write ids - try (PreparedStatement pstmt = dbConn.prepareStatement(TXN_TO_WRITE_ID_INSERT_QUERY)) { - for (long txnId : txnIds) { - pstmt.setLong(1, txnId); - pstmt.setString(2, dbName); - pstmt.setString(3, tblName); - pstmt.setLong(4, writeId); - pstmt.addBatch(); - - txnToWriteIds.add(new TxnToWriteId(txnId, writeId)); - LOG.info("Allocated writeId: {} for txnId: {}", writeId, txnId); - writeId++; - if (txnToWriteIds.size() % maxBatchSize == 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", TXN_TO_WRITE_ID_INSERT_QUERY, - maxBatchSize); - pstmt.executeBatch(); - } - } - if (txnToWriteIds.size() % maxBatchSize != 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", TXN_TO_WRITE_ID_INSERT_QUERY, - txnToWriteIds.size() % maxBatchSize); - pstmt.executeBatch(); - } - } - - if (transactionalListeners != null) { - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - EventMessage.EventType.ALLOC_WRITE_ID, - new AllocWriteIdEvent(txnToWriteIds, dbName, tblName), - dbConn, sqlGenerator); - } - - LOG.info("Allocated write ids for dbName={}, tblName={} (txnIds: {})", dbName, tblName, rqst.getTxnIds()); - dbConn.commit(); - return new AllocateTableWriteIdsResponse(txnToWriteIds); - } catch (SQLException e) { - LOG.error("Exception during write ids allocation for request={}. Will retry if possible.", rqst, e); - rollbackDBConn(dbConn); - checkRetryable(e, "allocateTableWriteIds(" + rqst + ")", true); - throw new MetaException("Unable to update transaction database " - + StringUtils.stringifyException(e)); - } finally { - close(rs, pStmt, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - return allocateTableWriteIds(rqst); - } - } - - @Override - public MaxAllocatedTableWriteIdResponse getMaxAllocatedTableWrited(MaxAllocatedTableWriteIdRequest rqst) throws MetaException { - String dbName = rqst.getDbName(); - String tableName = rqst.getTableName(); - try { - Connection dbConn = null; - PreparedStatement pStmt = null; - ResultSet rs = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - pStmt = sqlGenerator.prepareStmtWithParameters(dbConn, SELECT_NWI_NEXT_FROM_NEXT_WRITE_ID, - Arrays.asList(dbName, tableName)); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + SELECT_NWI_NEXT_FROM_NEXT_WRITE_ID.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tableName)); - } - rs = pStmt.executeQuery(); - // If there is no record, we never allocated anything - long maxWriteId = 0l; - if (rs.next()) { - // The row contains the nextId not the previously allocated - maxWriteId = rs.getLong(1) - 1; - } - return new MaxAllocatedTableWriteIdResponse(maxWriteId); - } catch (SQLException e) { - LOG.error( - "Exception during reading the max allocated writeId for dbName={}, tableName={}. Will retry if possible.", - dbName, tableName, e); - checkRetryable(e, "getMaxAllocatedTableWrited(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); - } finally { - close(rs, pStmt, dbConn); - } - } catch (RetryException e) { - return getMaxAllocatedTableWrited(rqst); - } - } - - @Override - public void seedWriteId(SeedTableWriteIdsRequest rqst) - throws MetaException { - try { - Connection dbConn = null; - PreparedStatement pst = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - //since this is on conversion from non-acid to acid, NEXT_WRITE_ID should not have an entry - //for this table. It also has a unique index in case 'should not' is violated - - // First allocation of write id should add the table to the next_write_id meta table - // The initial value for write id should be 1 and hence we add 1 with number of write ids - // allocated here - String s = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (?, ?, " - + Long.toString(rqst.getSeedWriteId() + 1) + ")"; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, Arrays.asList(rqst.getDbName(), rqst.getTableName())); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute insert <" + s.replace("?", "{}") + ">", - quoteString(rqst.getDbName()), quoteString(rqst.getTableName())); - } - pst.execute(); - LOG.debug("Going to commit"); - dbConn.commit(); - } catch (SQLException e) { - rollbackDBConn(dbConn); - checkRetryable(e, "seedWriteId(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); - } finally { - close(null, pst, dbConn); - } - } catch (RetryException e) { - seedWriteId(rqst); - } - } - - @Override - public void seedTxnId(SeedTxnIdRequest rqst) throws MetaException { - try { - Connection dbConn = null; - Statement stmt = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - /* - * Locking the txnLock an exclusive way, we do not want to set the txnId backward accidentally - * if there are concurrent open transactions - */ - acquireTxnLock(stmt, false); - long highWaterMark = getHighWaterMark(stmt); - if (highWaterMark >= rqst.getSeedTxnId()) { - throw new MetaException(MessageFormat - .format("Invalid txnId seed {}, the highWaterMark is {}", rqst.getSeedTxnId(), highWaterMark)); - } - TxnUtils.seedTxnSequence(dbConn, conf, stmt, rqst.getSeedTxnId()); - dbConn.commit(); - - } catch (SQLException e) { - rollbackDBConn(dbConn); - checkRetryable(e, "seedTxnId(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " + StringUtils.stringifyException(e)); - } finally { - close(null, stmt, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - seedTxnId(rqst); - } - } - - @Override - @RetrySemantics.Idempotent - public void addWriteNotificationLog(ListenerEvent acidWriteEvent) throws MetaException { - Connection dbConn = jdbcResource.getConnection(); - MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners, - acidWriteEvent instanceof AcidWriteEvent ? EventMessage.EventType.ACID_WRITE - : EventMessage.EventType.BATCH_ACID_WRITE, - acidWriteEvent, dbConn, sqlGenerator); - } - - @Override - @RetrySemantics.SafeToRetry - public void performWriteSetGC() throws MetaException { - Connection dbConn = null; - Statement stmt = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - long commitHighWaterMark = getMinOpenTxnIdWaterMark(dbConn); - int delCnt = stmt.executeUpdate("DELETE FROM \"WRITE_SET\" WHERE \"WS_COMMIT_ID\" < " + commitHighWaterMark); - LOG.info("Deleted {} obsolete rows from WRITE_SET", delCnt); - dbConn.commit(); - } catch (SQLException ex) { - LOG.warn("WriteSet GC failed due to " + getMessage(ex), ex); - } finally { - close(null, stmt, dbConn); - } - } - - protected long getMinOpenTxnIdWaterMark(Connection dbConn) throws SQLException, MetaException { - /** - * We try to find the highest transactionId below everything was committed or aborted. - * For that we look for the lowest open transaction in the TXNS and the TxnMinTimeout boundary, - * because it is guaranteed there won't be open transactions below that. - */ - long minOpenTxn; - try (Statement stmt = dbConn.createStatement()) { - try (ResultSet rs = stmt - .executeQuery("SELECT MIN(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STATE\"=" + TxnStatus.OPEN)) { - if (!rs.next()) { - throw new IllegalStateException("Scalar query returned no rows?!?!!"); - } - minOpenTxn = rs.getLong(1); - if (rs.wasNull()) { - minOpenTxn = Long.MAX_VALUE; - } - } - } catch (SQLException e) { - throw new UncategorizedSQLException(null, null, e); - } - long lowWaterMark = getOpenTxnTimeoutLowBoundaryTxnId(dbConn); - LOG.debug("MinOpenTxnIdWaterMark calculated with minOpenTxn {}, lowWaterMark {}", minOpenTxn, lowWaterMark); - return Long.min(minOpenTxn, lowWaterMark + 1); - } - - @Override - public void updateTransactionStatistics(UpdateTransactionalStatsRequest req) throws MetaException { - String queryText = "UPDATE \"MV_TABLES_USED\" " + - "SET \"INSERTED_COUNT\"=\"INSERTED_COUNT\"+?" + - ",\"UPDATED_COUNT\"=\"UPDATED_COUNT\"+?" + - ",\"DELETED_COUNT\"=\"DELETED_COUNT\"+?" + - " WHERE \"TBL_ID\"=?"; - try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED)) { - try (PreparedStatement pstmt = dbConn.prepareStatement(queryText)) { - pstmt.setLong(1, req.getInsertCount()); - pstmt.setLong(2, req.getUpdatedCount()); - pstmt.setLong(3, req.getDeletedCount()); - pstmt.setLong(4, req.getTableId()); - LOG.debug("Going to execute query <{}>", queryText); - int res = pstmt.executeUpdate(); - dbConn.commit(); - LOG.debug("Updated {} records tblId={}", res, req.getTableId()); - } - } catch (SQLException ex) { - LOG.warn("Unable to update transactional statistics tblId=" + req.getTableId(), ex); - throw new MetaException("Unable to update transactional statistics" + " " + StringUtils.stringifyException(ex)); - } - } - - /** - * Get invalidation info for the materialization. Materialization information - * contains information about whether there was update/delete/compaction operations on the source - * tables used by the materialization since it was created. - */ - @Override - @RetrySemantics.ReadOnly - public Materialization getMaterializationInvalidationInfo( - CreationMetadata creationMetadata, String validTxnListStr) throws MetaException { - if (creationMetadata.getTablesUsed().isEmpty()) { - // Bail out - LOG.warn("Materialization creation metadata does not contain any table"); - return null; - } - - // We are composing a query that returns a single row if an update happened after - // the materialization was created. Otherwise, query returns 0 rows. - - // Parse validReaderWriteIdList from creation metadata - MaterializationSnapshot mvSnapshot = MaterializationSnapshot.fromJson(creationMetadata.getValidTxnList()); - if (mvSnapshot.getTableSnapshots() != null && !mvSnapshot.getTableSnapshots().isEmpty()) { - // Incremental rebuild of MVs on Iceberg sources is not supported. - return null; - } - final ValidTxnWriteIdList validReaderWriteIdList = new ValidTxnWriteIdList(mvSnapshot.getValidTxnList()); - - // Parse validTxnList - final ValidReadTxnList currentValidTxnList = new ValidReadTxnList(validTxnListStr); - // Get the valid write id list for the tables in current state - final List currentTblValidWriteIdsList = new ArrayList<>(); - Connection dbConn = null; - for (String fullTableName : creationMetadata.getTablesUsed()) { - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - currentTblValidWriteIdsList.add(getValidWriteIdsForTable(dbConn, fullTableName, currentValidTxnList)); - } catch (SQLException ex) { - String errorMsg = "Unable to query Valid writeIds of table " + fullTableName; - LOG.warn(errorMsg, ex); - throw new MetaException(errorMsg + " " + StringUtils.stringifyException(ex)); - } finally { - closeDbConn(dbConn); - } - } - final ValidTxnWriteIdList currentValidReaderWriteIdList = TxnCommonUtils.createValidTxnWriteIdList( - currentValidTxnList.getHighWatermark(), currentTblValidWriteIdsList); - - List params = new ArrayList<>(); - StringBuilder queryUpdateDelete = new StringBuilder(); - StringBuilder queryCompletedCompactions = new StringBuilder(); - StringBuilder queryCompactionQueue = new StringBuilder(); - // compose a query that select transactions containing an update... - queryUpdateDelete.append("SELECT \"CTC_UPDATE_DELETE\" FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_UPDATE_DELETE\" ='Y' AND ("); - queryCompletedCompactions.append("SELECT 1 FROM \"COMPLETED_COMPACTIONS\" WHERE ("); - queryCompactionQueue.append("SELECT 1 FROM \"COMPACTION_QUEUE\" WHERE ("); - int i = 0; - for (String fullyQualifiedName : creationMetadata.getTablesUsed()) { - ValidWriteIdList tblValidWriteIdList = - validReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName); - if (tblValidWriteIdList == null) { - LOG.warn("ValidWriteIdList for table {} not present in creation metadata, this should not happen", fullyQualifiedName); - return null; - } - - // First, we check whether the low watermark has moved for any of the tables. - // If it has, we return true, since it is not incrementally refreshable, e.g., - // one of the commits that are not available may be an update/delete. - ValidWriteIdList currentTblValidWriteIdList = - currentValidReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName); - if (currentTblValidWriteIdList == null) { - LOG.warn("Current ValidWriteIdList for table {} not present in creation metadata, this should not happen", fullyQualifiedName); - return null; - } - if (!Objects.equals(currentTblValidWriteIdList.getMinOpenWriteId(), tblValidWriteIdList.getMinOpenWriteId())) { - LOG.debug("Minimum open write id do not match for table {}", fullyQualifiedName); - return null; - } - - // ...for each of the tables that are part of the materialized view, - // where the transaction had to be committed after the materialization was created... - if (i != 0) { - queryUpdateDelete.append("OR"); - queryCompletedCompactions.append("OR"); - queryCompactionQueue.append("OR"); - } - String[] names = TxnUtils.getDbTableName(fullyQualifiedName); - assert (names.length == 2); - queryUpdateDelete.append(" (\"CTC_DATABASE\"=? AND \"CTC_TABLE\"=?"); - queryCompletedCompactions.append(" (\"CC_DATABASE\"=? AND \"CC_TABLE\"=?"); - queryCompactionQueue.append(" (\"CQ_DATABASE\"=? AND \"CQ_TABLE\"=?"); - params.add(names[0]); - params.add(names[1]); - queryUpdateDelete.append(" AND (\"CTC_WRITEID\" > " + tblValidWriteIdList.getHighWatermark()); - queryCompletedCompactions.append(" AND (\"CC_HIGHEST_WRITE_ID\" > " + tblValidWriteIdList.getHighWatermark()); - queryUpdateDelete.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " : - " OR \"CTC_WRITEID\" IN(" + StringUtils.join(",", - Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ) "); - queryCompletedCompactions.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " : - " OR \"CC_HIGHEST_WRITE_ID\" IN(" + StringUtils.join(",", - Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ) "); - queryUpdateDelete.append(") "); - queryCompletedCompactions.append(") "); - queryCompactionQueue.append(") "); - i++; - } - // ... and where the transaction has already been committed as per snapshot taken - // when we are running current query - queryUpdateDelete.append(") AND \"CTC_TXNID\" <= " + currentValidTxnList.getHighWatermark()); - queryUpdateDelete.append(currentValidTxnList.getInvalidTransactions().length == 0 ? " " : - " AND \"CTC_TXNID\" NOT IN(" + StringUtils.join(",", - Arrays.asList(ArrayUtils.toObject(currentValidTxnList.getInvalidTransactions()))) + ") "); - queryCompletedCompactions.append(")"); - queryCompactionQueue.append(") "); - - boolean hasUpdateDelete = executeBoolean(queryUpdateDelete.toString(), params, - "Unable to retrieve materialization invalidation information: completed transaction components."); - - // Execute query - queryCompletedCompactions.append(" UNION "); - queryCompletedCompactions.append(queryCompactionQueue.toString()); - List paramsTwice = new ArrayList<>(params); - paramsTwice.addAll(params); - boolean hasCompaction = executeBoolean(queryCompletedCompactions.toString(), paramsTwice, - "Unable to retrieve materialization invalidation information: compactions"); - - return new Materialization(hasUpdateDelete, hasCompaction); - } - - private boolean executeBoolean(String queryText, List params, String errorMessage) throws MetaException { - Connection dbConn = null; - PreparedStatement pst = null; - ResultSet rs = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - LOG.debug("Going to execute query <{}>", queryText); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, queryText, params); - pst.setMaxRows(1); - rs = pst.executeQuery(); - - return rs.next(); - } catch (SQLException ex) { - LOG.warn(errorMessage, ex); - throw new MetaException(errorMessage + " " + StringUtils.stringifyException(ex)); - } finally { - close(rs, pst, dbConn); - } - } - - @Override - public LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) - throws MetaException { - - if (LOG.isDebugEnabled()) { - LOG.debug("Acquiring lock for materialization rebuild with {} for {}", - JavaUtils.txnIdToString(txnId), TableName.getDbTable(dbName, tableName)); - } - - TxnStore.MutexAPI.LockHandle handle = null; - Connection dbConn = null; - PreparedStatement pst = null; - ResultSet rs = null; - try { - lockInternal(); - /** - * MUTEX_KEY.MaterializationRebuild lock ensures that there is only 1 entry in - * Initiated/Working state for any resource. This ensures we do not run concurrent - * rebuild operations on any materialization. - */ - handle = getMutexAPI().acquireLock(MUTEX_KEY.MaterializationRebuild.name()); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - List params = Arrays.asList(dbName, tableName); - String selectQ = "SELECT \"MRL_TXN_ID\" FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE" + - " \"MRL_DB_NAME\" = ? AND \"MRL_TBL_NAME\" = ?"; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, selectQ, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + selectQ.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tableName)); - } - rs = pst.executeQuery(); - if(rs.next()) { - LOG.info("Ignoring request to rebuild {}/{} since it is already being rebuilt", dbName, tableName); - return new LockResponse(txnId, LockState.NOT_ACQUIRED); - } - String insertQ = "INSERT INTO \"MATERIALIZATION_REBUILD_LOCKS\" " + - "(\"MRL_TXN_ID\", \"MRL_DB_NAME\", \"MRL_TBL_NAME\", \"MRL_LAST_HEARTBEAT\") VALUES (" + txnId + - ", ?, ?, " + Instant.now().toEpochMilli() + ")"; - closeStmt(pst); - pst = sqlGenerator.prepareStmtWithParameters(dbConn, insertQ, params); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute update <" + insertQ.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tableName)); - } - pst.executeUpdate(); - LOG.debug("Going to commit"); - dbConn.commit(); - return new LockResponse(txnId, LockState.ACQUIRED); - } catch (SQLException ex) { - LOG.warn("lockMaterializationRebuild failed due to " + getMessage(ex), ex); - throw new MetaException("Unable to retrieve materialization invalidation information due to " + - StringUtils.stringifyException(ex)); - } finally { - close(rs, pst, dbConn); - if(handle != null) { - handle.releaseLocks(); - } - unlockInternal(); - } - } - - @Override - public boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) - throws MetaException { - try { - Connection dbConn = null; - PreparedStatement pst = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - String s = "UPDATE \"MATERIALIZATION_REBUILD_LOCKS\"" + - " SET \"MRL_LAST_HEARTBEAT\" = " + Instant.now().toEpochMilli() + - " WHERE \"MRL_TXN_ID\" = " + txnId + - " AND \"MRL_DB_NAME\" = ?" + - " AND \"MRL_TBL_NAME\" = ?"; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, Arrays.asList(dbName, tableName)); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute update <" + s.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tableName)); - } - int rc = pst.executeUpdate(); - if (rc < 1) { - LOG.debug("Going to rollback"); - dbConn.rollback(); - LOG.info("No lock found for rebuild of {} when trying to heartbeat", TableName.getDbTable(dbName, tableName)); - // It could not be renewed, return that information - return false; - } - LOG.debug("Going to commit"); - dbConn.commit(); - // It could be renewed, return that information - return true; - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, - "heartbeatLockMaterializationRebuild(" + TableName.getDbTable(dbName, tableName) + ", " + txnId + ")"); - throw new MetaException("Unable to heartbeat rebuild lock due to " + - StringUtils.stringifyException(e)); - } finally { - close(null, pst, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - return heartbeatLockMaterializationRebuild(dbName, tableName ,txnId); - } - } - - @Override - public long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) throws MetaException { - try { - // Aux values - long cnt = 0L; - List txnIds = new ArrayList<>(); - long timeoutTime = Instant.now().toEpochMilli() - timeout; - - Connection dbConn = null; - Statement stmt = null; - ResultSet rs = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - - String selectQ = "SELECT \"MRL_TXN_ID\", \"MRL_LAST_HEARTBEAT\" FROM \"MATERIALIZATION_REBUILD_LOCKS\""; - LOG.debug("Going to execute query <{}>", selectQ); - rs = stmt.executeQuery(selectQ); - while(rs.next()) { - long lastHeartbeat = rs.getLong(2); - if (lastHeartbeat < timeoutTime) { - // The heartbeat has timeout, double check whether we can remove it - long txnId = rs.getLong(1); - if (validTxnList.isTxnValid(txnId) || validTxnList.isTxnAborted(txnId)) { - // Txn was committed (but notification was not received) or it was aborted. - // Either case, we can clean it up - txnIds.add(txnId); - } - } - } - if (!txnIds.isEmpty()) { - String deleteQ = "DELETE FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE" + - " \"MRL_TXN_ID\" IN(" + StringUtils.join(",", txnIds) + ") "; - LOG.debug("Going to execute update <{}>", deleteQ); - cnt = stmt.executeUpdate(deleteQ); - } - LOG.debug("Going to commit"); - dbConn.commit(); - return cnt; - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "cleanupMaterializationRebuildLocks"); - throw new MetaException("Unable to clean rebuild locks due to " + - StringUtils.stringifyException(e)); - } finally { - close(rs, stmt, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - return cleanupMaterializationRebuildLocks(validTxnList, timeout); - } - } - - /** - * As much as possible (i.e. in absence of retries) we want both operations to be done on the same - * connection (but separate transactions). - * - * Retry-by-caller note: If the call to lock is from a transaction, then in the worst case - * there will be a duplicate set of locks but both sets will belong to the same txn so they - * will not conflict with each other. For locks w/o txn context (i.e. read-only query), this - * may lead to deadlock (at least a long wait). (e.g. 1st call creates locks in {@code LOCK_WAITING} - * mode and response gets lost. Then {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient} - * retries, and enqueues another set of locks in LOCK_WAITING. The 2nd LockResponse is delivered - * to the DbLockManager, which will keep dong {@link #checkLock(CheckLockRequest)} until the 1st - * set of locks times out. - */ - @RetrySemantics.CannotRetry - public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { - ConnectionLockIdPair connAndLockId = enqueueLockWithRetry(rqst); - try { - return checkLockWithRetry(connAndLockId.dbConn, connAndLockId.extLockId, rqst.getTxnid(), - rqst.isZeroWaitReadEnabled(), rqst.isExclusiveCTAS()); - } - catch(NoSuchLockException e) { - // This should never happen, as we just added the lock id - throw new MetaException("Couldn't find a lock we just created! " + e.getMessage()); - } - } - private static final class ConnectionLockIdPair { - private final Connection dbConn; - private final long extLockId; - private ConnectionLockIdPair(Connection dbConn, long extLockId) { - this.dbConn = dbConn; - this.extLockId = extLockId; - } - } - - /** - * Note that by definition select for update is divorced from update, i.e. you executeQuery() to read - * and then executeUpdate(). One other alternative would be to actually update the row in TXNS but - * to the same value as before thus forcing db to acquire write lock for duration of the transaction. - * - * SELECT ... FOR UPDATE locks the row until the transaction commits or rolls back. - * Second connection using `SELECT ... FOR UPDATE` will suspend until the lock is released. - * @return the txnType wrapped in an {@link Optional} - * @throws SQLException - * @throws MetaException - */ - private TxnType getOpenTxnTypeAndLock(Statement stmt, long txnId) throws SQLException, MetaException { - String query = "SELECT \"TXN_TYPE\" FROM \"TXNS\" WHERE \"TXN_ID\" = " + txnId - + " AND \"TXN_STATE\" = " + TxnStatus.OPEN; - try (ResultSet rs = stmt.executeQuery(sqlGenerator.addForUpdateClause(query))) { - return rs.next() ? TxnType.findByValue(rs.getInt(1)) : null; - } - } - - /** - * This enters locks into the queue in {@link #LOCK_WAITING} mode. - * - * Isolation Level Notes: - * 1. We use S4U (withe read_committed) to generate the next (ext) lock id. This serializes - * any 2 {@code enqueueLockWithRetry()} calls. - * 2. We use S4U on the relevant TXNS row to block any concurrent abort/commit/etc operations - * @see #checkLockWithRetry(Connection, long, long, boolean, boolean) - */ - private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) - throws NoSuchTxnException, TxnAbortedException, MetaException { - boolean success = false; - Connection dbConn = null; - try { - Statement stmt = null; - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - long txnid = rqst.getTxnid(); - stmt = dbConn.createStatement(); - if (isValidTxn(txnid)) { - //this also ensures that txn is still there in expected state - TxnType txnType = getOpenTxnTypeAndLock(stmt, txnid); - if (txnType == null) { - ensureValidTxn(dbConn, txnid, stmt); - shouldNeverHappen(txnid); - } - } - /* Insert txn components and hive locks (with a temp extLockId) first, before getting the next lock ID in a select-for-update. - This should minimize the scope of the S4U and decrease the table lock duration. */ - insertTxnComponents(txnid, rqst, dbConn); - long tempExtLockId = insertHiveLocksWithTemporaryExtLockId(txnid, dbConn, rqst); - - /** Get the next lock id. - * This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race. - * Suppose ID gen is a separate txn and 2 concurrent lock() methods are running. 1st one generates nl_next=7, - * 2nd nl_next=8. Then 8 goes first to insert into HIVE_LOCKS and acquires the locks. Then 7 unblocks, - * and add it's W locks but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)} - * doesn't block on locks acquired later than one it's checking*/ - long extLockId = getNextLockIdForUpdate(dbConn, stmt); - incrementLockIdAndUpdateHiveLocks(stmt, extLockId, tempExtLockId); - - dbConn.commit(); - success = true; - return new ConnectionLockIdPair(dbConn, extLockId); - } catch (SQLException e) { - LOG.error("enqueueLock failed for request: {}. Exception msg: {}", rqst, getMessage(e)); - rollbackDBConn(dbConn); - checkRetryable(e, "enqueueLockWithRetry(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " + - StringUtils.stringifyException(e)); - } finally { - closeStmt(stmt); - if (!success) { - /* This needs to return a "live" connection to be used by operation that follows it. - Thus it only closes Connection on failure/retry. */ - closeDbConn(dbConn); - } - unlockInternal(); - } - } - catch(RetryException e) { - LOG.debug("Going to retry enqueueLock for request: {}, after catching RetryException with message: {}", - rqst, e.getMessage()); - return enqueueLockWithRetry(rqst); - } - } - - private long getNextLockIdForUpdate(Connection dbConn, Statement stmt) throws SQLException, MetaException { - String s = sqlGenerator.addForUpdateClause("SELECT \"NL_NEXT\" FROM \"NEXT_LOCK_ID\""); - LOG.debug("Going to execute query <{}>", s); - try (ResultSet rs = stmt.executeQuery(s)) { - if (!rs.next()) { - LOG.error("Failure to get next lock ID for update! SELECT query returned empty ResultSet."); - dbConn.rollback(); - throw new MetaException("Transaction tables not properly " + - "initialized, no record found in next_lock_id"); - } - return rs.getLong(1); - } - } - - private void incrementLockIdAndUpdateHiveLocks(Statement stmt, long extLockId, long tempId) throws SQLException { - String incrCmd = String.format(INCREMENT_NEXT_LOCK_ID_QUERY, (extLockId + 1)); - // update hive locks entries with the real EXT_LOCK_ID (replace temp ID) - String updateLocksCmd = String.format(UPDATE_HIVE_LOCKS_EXT_ID_QUERY, extLockId, tempId); - LOG.debug("Going to execute updates in batch: <{}>, and <{}>", incrCmd, updateLocksCmd); - stmt.addBatch(incrCmd); - stmt.addBatch(updateLocksCmd); - stmt.executeBatch(); - } - - private void insertTxnComponents(long txnid, LockRequest rqst, Connection dbConn) throws SQLException { - if (txnid > 0) { - Map, Optional> writeIdCache = new HashMap<>(); - try (PreparedStatement pstmt = dbConn.prepareStatement(TXN_COMPONENTS_INSERT_QUERY)) { - // For each component in this lock request, - // add an entry to the txn_components table - int insertCounter = 0; - - Predicate isDynPart = lc -> lc.isSetIsDynamicPartitionWrite() && lc.isIsDynamicPartitionWrite(); - Function> groupKey = lc -> - Pair.of(normalizeCase(lc.getDbname()), normalizeCase(lc.getTablename())); - - Set> isDynPartUpdate = rqst.getComponent().stream().filter(isDynPart) - .filter(lc -> lc.getOperationType() == DataOperationType.UPDATE || lc.getOperationType() == DataOperationType.DELETE) - .map(groupKey) - .collect(Collectors.toSet()); - - for (LockComponent lc : rqst.getComponent()) { - if (lc.isSetIsTransactional() && !lc.isIsTransactional()) { - //we don't prevent using non-acid resources in a txn but we do lock them - continue; - } - if (!shouldUpdateTxnComponent(txnid, rqst, lc)) { - continue; - } - String dbName = normalizeCase(lc.getDbname()); - String tblName = normalizeCase(lc.getTablename()); - String partName = normalizePartitionCase(lc.getPartitionname()); - OperationType opType = OperationType.fromDataOperationType(lc.getOperationType()); - - if (isDynPart.test(lc)) { - partName = null; - if (writeIdCache.containsKey(groupKey.apply(lc))) { - continue; - } - opType = isDynPartUpdate.contains(groupKey.apply(lc)) ? OperationType.UPDATE : OperationType.INSERT; - } - Optional writeId = getWriteId(writeIdCache, dbName, tblName, txnid, dbConn); - - pstmt.setLong(1, txnid); - pstmt.setString(2, dbName); - pstmt.setString(3, tblName); - pstmt.setString(4, partName); - pstmt.setString(5, opType.getSqlConst()); - pstmt.setObject(6, writeId.orElse(null)); - - pstmt.addBatch(); - insertCounter++; - if (insertCounter % maxBatchSize == 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", TXN_COMPONENTS_INSERT_QUERY, maxBatchSize); - pstmt.executeBatch(); - } - } - if (insertCounter % maxBatchSize != 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", TXN_COMPONENTS_INSERT_QUERY, - insertCounter % maxBatchSize); - pstmt.executeBatch(); - } - } - } - } - - private Optional getWriteId(Map, Optional> writeIdCache, String dbName, String tblName, long txnid, Connection dbConn) throws SQLException { - /* we can cache writeIDs based on dbName and tblName because txnid is invariant and - partitionName is not part of the writeID select query */ - Pair dbAndTable = Pair.of(dbName, tblName); - if (writeIdCache.containsKey(dbAndTable)) { - return writeIdCache.get(dbAndTable); - } else { - Optional writeId = getWriteIdFromDb(txnid, dbConn, dbName, tblName); - writeIdCache.put(dbAndTable, writeId); - return writeId; - } - } - - private Optional getWriteIdFromDb(long txnid, Connection dbConn, String dbName, String tblName) throws SQLException { - if (tblName != null) { - // It is assumed the caller have already allocated write id for adding/updating data to - // the acid tables. However, DDL operatons won't allocate write id and hence this query - // may return empty result sets. - // Get the write id allocated by this txn for the given table writes - try (PreparedStatement pstmt = dbConn.prepareStatement(SELECT_WRITE_ID_QUERY)) { - pstmt.setString(1, dbName); - pstmt.setString(2, tblName); - pstmt.setLong(3, txnid); - LOG.debug("Going to execute query <{}>", SELECT_WRITE_ID_QUERY); - try (ResultSet rs = pstmt.executeQuery()) { - if (rs.next()) { - return Optional.of(rs.getLong(1)); - } - } - } - } - return Optional.empty(); - } - - private boolean shouldUpdateTxnComponent(long txnid, LockRequest rqst, LockComponent lc) { - if(!lc.isSetOperationType()) { - //request came from old version of the client - return true; //this matches old behavior - } - else { - switch (lc.getOperationType()) { - case INSERT: - case UPDATE: - case DELETE: - return true; - case SELECT: - return false; - case NO_TXN: - /*this constant is a bit of a misnomer since we now always have a txn context. It - just means the operation is such that we don't care what tables/partitions it - affected as it doesn't trigger a compaction or conflict detection. A better name - would be NON_TRANSACTIONAL.*/ - return false; - default: - //since we have an open transaction, only 4 values above are expected - throw new IllegalStateException("Unexpected DataOperationType: " + lc.getOperationType() - + " agentInfo=" + rqst.getAgentInfo() + " " + JavaUtils.txnIdToString(txnid)); - } - } - } - - private long insertHiveLocksWithTemporaryExtLockId(long txnid, Connection dbConn, LockRequest rqst) throws MetaException, SQLException { - - String lastHB = isValidTxn(txnid) ? "0" : getEpochFn(dbProduct); - String insertLocksQuery = String.format(HIVE_LOCKS_INSERT_QRY, lastHB); - long intLockId = 0; - long tempExtLockId = generateTemporaryId(); - - try (PreparedStatement pstmt = dbConn.prepareStatement(insertLocksQuery)) { - for (LockComponent lc : rqst.getComponent()) { - intLockId++; - String lockType = LockTypeUtil.getEncodingAsStr(lc.getType()); - - pstmt.setLong(1, tempExtLockId); - pstmt.setLong(2, intLockId); - pstmt.setLong(3, txnid); - pstmt.setString(4, normalizeCase(lc.getDbname())); - pstmt.setString(5, normalizeCase(lc.getTablename())); - pstmt.setString(6, normalizePartitionCase(lc.getPartitionname())); - pstmt.setString(7, Character.toString(LOCK_WAITING)); - pstmt.setString(8, lockType); - pstmt.setString(9, rqst.getUser()); - pstmt.setString(10, rqst.getHostname()); - pstmt.setString(11, rqst.getAgentInfo()); - - pstmt.addBatch(); - if (intLockId % maxBatchSize == 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", insertLocksQuery, maxBatchSize); - pstmt.executeBatch(); - } - } - if (intLockId % maxBatchSize != 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", insertLocksQuery, intLockId % maxBatchSize); - pstmt.executeBatch(); - } - } - return tempExtLockId; - } - - private long generateTemporaryId() { - return -1 * ThreadLocalRandom.current().nextLong(); - } - - private static String normalizeCase(String s) { - return s == null ? null : s.toLowerCase(); - } - - private static String normalizePartitionCase(String s) { - if (s == null) { - return null; - } - Map map = Splitter.on(Path.SEPARATOR).withKeyValueSeparator('=').split(s); - return FileUtils.makePartName(new ArrayList<>(map.keySet()), new ArrayList<>(map.values())); - } - - private LockResponse checkLockWithRetry(Connection dbConn, long extLockId, long txnId, boolean zeroWaitReadEnabled, - boolean isExclusiveCTAS) - throws NoSuchLockException, TxnAbortedException, MetaException { - try { - try { - lockInternal(); - if(dbConn.isClosed()) { - //should only get here if retrying this op - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - } - return checkLock(dbConn, extLockId, txnId, zeroWaitReadEnabled, isExclusiveCTAS); - } catch (SQLException e) { - LOG.error("checkLock failed for extLockId={}/txnId={}. Exception msg: {}", extLockId, txnId, getMessage(e)); - rollbackDBConn(dbConn); - checkRetryable(e, "checkLockWithRetry(" + extLockId + "," + txnId + ")"); - throw new MetaException("Unable to update transaction database " + - StringUtils.stringifyException(e)); - } finally { - unlockInternal(); - closeDbConn(dbConn); - } - } - catch(RetryException e) { - LOG.debug("Going to retry checkLock for extLockId={}/txnId={} after catching RetryException with message: {}", - extLockId, txnId, e.getMessage()); - return checkLockWithRetry(dbConn, extLockId, txnId, zeroWaitReadEnabled, isExclusiveCTAS); - } - } - /** - * Why doesn't this get a txnid as parameter? The caller should either know the txnid or know there isn't one. - * Either way getTxnIdFromLockId() will not be needed. This would be a Thrift change. - * - * Also, when lock acquisition returns WAITING, it's retried every 15 seconds (best case, see DbLockManager.backoff(), - * in practice more often) - * which means this is heartbeating way more often than hive.txn.timeout and creating extra load on DB. - * - * The clients that operate in blocking mode, can't heartbeat a lock until the lock is acquired. - * We should make CheckLockRequest include timestamp or last request to skip unnecessary heartbeats. Thrift change. - * - * {@link #checkLock(java.sql.Connection, long, long, boolean, boolean)} must run at SERIALIZABLE - * (make sure some lock we are checking against doesn't move from W to A in another txn) - * but this method can heartbeat in separate txn at READ_COMMITTED. - * - * Retry-by-caller note: - * Retryable because {@link #checkLock(Connection, long, long, boolean, boolean)} is - */ - @Override - @RetrySemantics.SafeToRetry - public LockResponse checkLock(CheckLockRequest rqst) - throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { - try { - Connection dbConn = null; - long extLockId = rqst.getLockid(); - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - // Heartbeat on the lockid first, to assure that our lock is still valid. - // Then look up the lock info (hopefully in the cache). If these locks - // are associated with a transaction then heartbeat on that as well. - LockInfo lockInfo = getLockFromLockId(dbConn, extLockId) - .orElseThrow(() -> new NoSuchLockException("No such lock " + JavaUtils.lockIdToString(extLockId))); - if (lockInfo.txnId > 0) { - heartbeatTxn(dbConn, lockInfo.txnId); - } - else { - heartbeatLock(dbConn, extLockId); - } - //todo: strictly speaking there is a bug here. heartbeat*() commits but both heartbeat and - //checkLock() are in the same retry block, so if checkLock() throws, heartbeat is also retired - //extra heartbeat is logically harmless, but ... - return checkLock(dbConn, extLockId, lockInfo.txnId, false, false); - } catch (SQLException e) { - LOG.error("checkLock failed for request={}. Exception msg: {}", rqst, getMessage(e)); - rollbackDBConn(dbConn); - checkRetryable(e, "checkLock(" + rqst + " )"); - throw new MetaException("Unable to update transaction database " + - JavaUtils.lockIdToString(extLockId) + " " + StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - unlockInternal(); - } - } catch (RetryException e) { - LOG.debug("Going to retry checkLock for request={} after catching RetryException with message: {}", - rqst, e.getMessage()); - return checkLock(rqst); - } - - } - - /** - * This would have been made simpler if all locks were associated with a txn. Then only txn needs to - * be heartbeated, committed, etc. no need for client to track individual locks. - * When removing locks not associated with txn this potentially conflicts with - * heartbeat/performTimeout which are update/delete of HIVE_LOCKS thus will be locked as needed by db. - * since this only removes from HIVE_LOCKS at worst some lock acquire is delayed - */ - @RetrySemantics.Idempotent - public void unlock(UnlockRequest rqst) throws TxnOpenException, MetaException { - try { - Connection dbConn = null; - Statement stmt = null; - long extLockId = rqst.getLockid(); - try { - /** - * This method is logically like commit for read-only auto commit queries. - * READ_COMMITTED since this only has 1 delete statement and no new entries with the - * same hl_lock_ext_id can be added, i.e. all rows with a given hl_lock_ext_id are - * created in a single atomic operation. - * Theoretically, this competes with {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)} - * but hl_lock_ext_id is not known until that method returns. - * Also competes with {@link #checkLock(org.apache.hadoop.hive.metastore.api.CheckLockRequest)} - * but using SERIALIZABLE doesn't materially change the interaction. - * If "delete" stmt misses, additional logic is best effort to produce meaningful error msg. - */ - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - //hl_txnid <> 0 means it's associated with a transaction - String s = "DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = " + extLockId + " AND (\"HL_TXNID\" = 0 OR" + - " (\"HL_TXNID\" <> 0 AND \"HL_LOCK_STATE\" = '" + LOCK_WAITING + "'))"; - //(hl_txnid <> 0 AND hl_lock_state = '" + LOCK_WAITING + "') is for multi-statement txns where - //some query attempted to lock (thus LOCK_WAITING state) but is giving up due to timeout for example - LOG.debug("Going to execute update <{}>", s); - int rc = stmt.executeUpdate(s); - if (rc < 1) { - LOG.info("Failure to unlock any locks with extLockId={}.", extLockId); - dbConn.rollback(); - Optional optLockInfo = getLockFromLockId(dbConn, extLockId); - if (!optLockInfo.isPresent()) { - //didn't find any lock with extLockId but at ReadCommitted there is a possibility that - //it existed when above delete ran but it didn't have the expected state. - LOG.info("No lock in {} mode found for unlock({})", LOCK_WAITING, - JavaUtils.lockIdToString(rqst.getLockid())); - //bail here to make the operation idempotent - return; - } - LockInfo lockInfo = optLockInfo.get(); - if (isValidTxn(lockInfo.txnId)) { - String msg = "Unlocking locks associated with transaction not permitted. " + lockInfo; - //if a lock is associated with a txn we can only "unlock" if if it's in WAITING state - // which really means that the caller wants to give up waiting for the lock - LOG.error(msg); - throw new TxnOpenException(msg); - } else { - //we didn't see this lock when running DELETE stmt above but now it showed up - //so should "should never happen" happened... - String msg = "Found lock in unexpected state " + lockInfo; - LOG.error(msg); - throw new MetaException(msg); - } - } - LOG.debug("Successfully unlocked at least 1 lock with extLockId={}", extLockId); - dbConn.commit(); - } catch (SQLException e) { - LOG.error("Unlock failed for request={}. Exception msg: {}", rqst, getMessage(e)); - rollbackDBConn(dbConn); - checkRetryable(e, "unlock(" + rqst + ")"); - throw new MetaException("Unable to update transaction database " + - JavaUtils.lockIdToString(extLockId) + " " + StringUtils.stringifyException(e)); - } finally { - closeStmt(stmt); - closeDbConn(dbConn); - } - } catch (RetryException e) { - unlock(rqst); - } - } - - /** - * used to sort entries in {@link org.apache.hadoop.hive.metastore.api.ShowLocksResponse} - */ - private static class LockInfoExt extends LockInfo { - private final ShowLocksResponseElement e; - LockInfoExt(ShowLocksResponseElement e) { - super(e); - this.e = e; - } - } - @RetrySemantics.ReadOnly - public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { - try { - Connection dbConn = null; - ShowLocksResponse rsp = new ShowLocksResponse(); - List elems = new ArrayList<>(); - List sortedList = new ArrayList<>(); - PreparedStatement pst = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - String s = "SELECT \"HL_LOCK_EXT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", " + - "\"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", \"HL_ACQUIRED_AT\", \"HL_USER\", \"HL_HOST\", \"HL_LOCK_INT_ID\"," + - "\"HL_BLOCKEDBY_EXT_ID\", \"HL_BLOCKEDBY_INT_ID\", \"HL_AGENT_INFO\" FROM \"HIVE_LOCKS\""; - - // Some filters may have been specified in the SHOW LOCKS statement. Add them to the query. - String dbName = rqst.getDbname(); - String tableName = rqst.getTablename(); - String partName = rqst.getPartname(); - List params = new ArrayList<>(); - - StringBuilder filter = new StringBuilder(); - if (dbName != null && !dbName.isEmpty()) { - filter.append("\"HL_DB\"=?"); - params.add(dbName); - } - if (tableName != null && !tableName.isEmpty()) { - if (filter.length() > 0) { - filter.append(" and "); - } - filter.append("\"HL_TABLE\"=?"); - params.add(tableName); - } - if (partName != null && !partName.isEmpty()) { - if (filter.length() > 0) { - filter.append(" and "); - } - filter.append("\"HL_PARTITION\"=?"); - params.add(partName); - } - if (rqst.isSetTxnid()) { - if (filter.length() > 0) { - filter.append(" and "); - } - filter.append("\"HL_TXNID\"=" + rqst.getTxnid()); - } - String whereClause = filter.toString(); - - if (!whereClause.isEmpty()) { - s = s + " where " + whereClause; - } - - pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params); - LOG.debug("Going to execute query <{}>", s); - ResultSet rs = pst.executeQuery(); - while (rs.next()) { - ShowLocksResponseElement e = new ShowLocksResponseElement(); - e.setLockid(rs.getLong(1)); - long txnid = rs.getLong(2); - if (!rs.wasNull()) e.setTxnid(txnid); - e.setDbname(rs.getString(3)); - e.setTablename(rs.getString(4)); - String partition = rs.getString(5); - if (partition != null) e.setPartname(partition); - switch (rs.getString(6).charAt(0)) { - case LOCK_ACQUIRED: e.setState(LockState.ACQUIRED); break; - case LOCK_WAITING: e.setState(LockState.WAITING); break; - default: throw new MetaException("Unknown lock state " + rs.getString(6).charAt(0)); - } - - char lockChar = rs.getString(7).charAt(0); - LockType lockType = LockTypeUtil.getLockTypeFromEncoding(lockChar) - .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); - e.setType(lockType); - - e.setLastheartbeat(rs.getLong(8)); - long acquiredAt = rs.getLong(9); - if (!rs.wasNull()) e.setAcquiredat(acquiredAt); - e.setUser(rs.getString(10)); - e.setHostname(rs.getString(11)); - e.setLockIdInternal(rs.getLong(12)); - long id = rs.getLong(13); - if(!rs.wasNull()) { - e.setBlockedByExtId(id); - } - id = rs.getLong(14); - if(!rs.wasNull()) { - e.setBlockedByIntId(id); - } - e.setAgentInfo(rs.getString(15)); - sortedList.add(new LockInfoExt(e)); - } - } catch (SQLException e) { - checkRetryable(e, "showLocks(" + rqst + ")"); - throw new MetaException("Unable to select from transaction database " + - StringUtils.stringifyException(e)); - } finally { - closeStmt(pst); - closeDbConn(dbConn); - } - //this ensures that "SHOW LOCKS" prints the locks in the same order as they are examined - //by checkLock() - makes diagnostics easier. - Collections.sort(sortedList, new LockInfoComparator()); - for(LockInfoExt lockInfoExt : sortedList) { - elems.add(lockInfoExt.e); - } - rsp.setLocks(elems); - return rsp; - } catch (RetryException e) { - return showLocks(rqst); - } - } - - /** - * {@code ids} should only have txnid or lockid but not both, ideally. - * Currently DBTxnManager.heartbeat() enforces this. - */ - @Override - @RetrySemantics.SafeToRetry - public void heartbeat(HeartbeatRequest ids) - throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { - try { - Connection dbConn = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - heartbeatLock(dbConn, ids.getLockid()); - heartbeatTxn(dbConn, ids.getTxnid()); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "heartbeat(" + ids + ")"); - throw new MetaException("Unable to select from transaction database " + - StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - } - } catch (RetryException e) { - heartbeat(ids); - } - } - @Override - @RetrySemantics.SafeToRetry - public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) - throws MetaException { - try { - Connection dbConn = null; - Statement stmt = null; - HeartbeatTxnRangeResponse rsp = new HeartbeatTxnRangeResponse(); - Set nosuch = new HashSet<>(); - Set aborted = new HashSet<>(); - rsp.setNosuch(nosuch); - rsp.setAborted(aborted); - try { - /** - * READ_COMMITTED is sufficient since {@link #heartbeatTxn(java.sql.Connection, long)} - * only has 1 update statement in it and - * we only update existing txns, i.e. nothing can add additional txns that this operation - * would care about (which would have required SERIALIZABLE) - */ - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - /*do fast path first (in 1 statement) if doesn't work, rollback and do the long version*/ - stmt = dbConn.createStatement(); - List queries = new ArrayList<>(); - int numTxnsToHeartbeat = (int) (rqst.getMax() - rqst.getMin() + 1); - List txnIds = new ArrayList<>(numTxnsToHeartbeat); - for (long txn = rqst.getMin(); txn <= rqst.getMax(); txn++) { - txnIds.add(txn); - } - TxnUtils.buildQueryWithINClause(conf, queries, - new StringBuilder("UPDATE \"TXNS\" SET \"TXN_LAST_HEARTBEAT\" = " + getEpochFn(dbProduct) + - " WHERE \"TXN_STATE\" = " + TxnStatus.OPEN + " AND "), - new StringBuilder(""), txnIds, "\"TXN_ID\"", true, false); - int updateCnt = 0; - for (String query : queries) { - LOG.debug("Going to execute update <{}>", query); - updateCnt += stmt.executeUpdate(query); - } - if (updateCnt == numTxnsToHeartbeat) { - //fast pass worked, i.e. all txns we were asked to heartbeat were Open as expected - dbConn.commit(); - return rsp; - } - //if here, do the slow path so that we can return info txns which were not in expected state - dbConn.rollback(); - for (long txn = rqst.getMin(); txn <= rqst.getMax(); txn++) { - try { - heartbeatTxn(dbConn, txn); - } catch (NoSuchTxnException e) { - nosuch.add(txn); - } catch (TxnAbortedException e) { - aborted.add(txn); - } - } - return rsp; - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "heartbeatTxnRange(" + rqst + ")"); - throw new MetaException("Unable to select from transaction database " + - StringUtils.stringifyException(e)); - } finally { - close(null, stmt, dbConn); - } - } catch (RetryException e) { - return heartbeatTxnRange(rqst); - } - } - - @Deprecated - long generateCompactionQueueId(Statement stmt) throws SQLException, MetaException { - // Get the id for the next entry in the queue - String s = sqlGenerator.addForUpdateClause("SELECT \"NCQ_NEXT\" FROM \"NEXT_COMPACTION_QUEUE_ID\""); - LOG.debug("going to execute query <{}>", s); - try (ResultSet rs = stmt.executeQuery(s)) { - if (!rs.next()) { - throw new IllegalStateException("Transaction tables not properly initiated, " - + "no record found in next_compaction_queue_id"); - } - long id = rs.getLong(1); - s = "UPDATE \"NEXT_COMPACTION_QUEUE_ID\" SET \"NCQ_NEXT\" = " + (id + 1) + " WHERE \"NCQ_NEXT\" = " + id; - LOG.debug("Going to execute update <{}>", s); - if (stmt.executeUpdate(s) != 1) { - //TODO: Eliminate this id generation by implementing: https://issues.apache.org/jira/browse/HIVE-27121 - LOG.info("The returned compaction ID ({}) already taken, obtaining new", id); - return generateCompactionQueueId(stmt); - } - return id; - } - } - - long generateCompactionQueueId() throws MetaException { - // Get the id for the next entry in the queue - String sql = sqlGenerator.addForUpdateClause("SELECT \"NCQ_NEXT\" FROM \"NEXT_COMPACTION_QUEUE_ID\""); - LOG.debug("going to execute SQL <{}>", sql); - - Long allocatedId = jdbcResource.getJdbcTemplate().query(sql, rs -> { - if (!rs.next()) { - throw new IllegalStateException("Transaction tables not properly initiated, " - + "no record found in next_compaction_queue_id"); - } - long id = rs.getLong(1); - - int count = jdbcResource.getJdbcTemplate().update("UPDATE \"NEXT_COMPACTION_QUEUE_ID\" SET \"NCQ_NEXT\" = :newId WHERE \"NCQ_NEXT\" = :id", - new MapSqlParameterSource() - .addValue("id", id) - .addValue("newId", id + 1)); - - if (count != 1) { - //TODO: Eliminate this id generation by implementing: https://issues.apache.org/jira/browse/HIVE-27121 - LOG.info("The returned compaction ID ({}) already taken, obtaining new", id); - return null; - } - return id; - }); - if (allocatedId == null) { - return generateCompactionQueueId(); - } else { - return allocatedId; - } - } - - - @Override - @RetrySemantics.ReadOnly - public long getTxnIdForWriteId( - String dbName, String tblName, long writeId) throws MetaException { - try { - Connection dbConn = null; - PreparedStatement pst = null; - try { - /** - * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()} - */ - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - String query = "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE" - + " \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND \"T2W_WRITEID\" = " + writeId; - pst = sqlGenerator.prepareStmtWithParameters(dbConn, query, Arrays.asList(dbName, tblName)); - if (LOG.isDebugEnabled()) { - LOG.debug("Going to execute query <" + query.replace("?", "{}") + ">", - quoteString(dbName), quoteString(tblName)); - } - ResultSet rs = pst.executeQuery(); - long txnId = -1; - if (rs.next()) { - txnId = rs.getLong(1); - } - return txnId; - } catch (SQLException e) { - checkRetryable(e, "getTxnIdForWriteId"); - throw new MetaException("Unable to select from transaction database, " - + StringUtils.stringifyException(e)); - } finally { - close(null, pst, dbConn); - } - } catch (RetryException e) { - return getTxnIdForWriteId(dbName, tblName, writeId); - } - } - - @Override - @RetrySemantics.Idempotent - public CompactionResponse compact(CompactionRequest rqst) throws MetaException { - // Put a compaction request in the queue. - try { - TxnStore.MutexAPI.LockHandle handle = null; - try { - lockInternal(); - /** - * MUTEX_KEY.CompactionScheduler lock ensures that there is only 1 entry in - * Initiated/Working state for any resource. This ensures that we don't run concurrent - * compactions for any resource. - */ - handle = getMutexAPI().acquireLock(MUTEX_KEY.CompactionScheduler.name()); - - try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED)) { - try (Statement stmt = dbConn.createStatement()) { - - long id = generateCompactionQueueId(stmt); - - GetValidWriteIdsRequest request = new GetValidWriteIdsRequest( - Collections.singletonList(getFullTableName(rqst.getDbname(), rqst.getTablename()))); - final ValidCompactorWriteIdList tblValidWriteIds = - TxnUtils.createValidCompactWriteIdList(getValidWriteIds(request).getTblValidWriteIds().get(0)); - if (LOG.isDebugEnabled()) { - LOG.debug("ValidCompactWriteIdList: {}", tblValidWriteIds.writeToString()); - } - - StringBuilder sb = new StringBuilder("SELECT \"CQ_ID\", \"CQ_STATE\" FROM \"COMPACTION_QUEUE\" WHERE"). - append(" (\"CQ_STATE\" IN("). - append(quoteChar(INITIATED_STATE)).append(",").append(quoteChar(WORKING_STATE)). - append(") OR (\"CQ_STATE\" = ").append(quoteChar(READY_FOR_CLEANING)). - append(" AND \"CQ_HIGHEST_WRITE_ID\" = ?))"). - append(" AND \"CQ_DATABASE\"=?"). - append(" AND \"CQ_TABLE\"=?").append(" AND "); - if(rqst.getPartitionname() == null) { - sb.append("\"CQ_PARTITION\" is null"); - } else { - sb.append("\"CQ_PARTITION\"=?"); - } - - try (PreparedStatement pst = dbConn.prepareStatement(sqlGenerator.addEscapeCharacters(sb.toString()))) { - pst.setLong(1, tblValidWriteIds.getHighWatermark()); - pst.setString(2, rqst.getDbname()); - pst.setString(3, rqst.getTablename()); - if (rqst.getPartitionname() != null) { - pst.setString(4, rqst.getPartitionname()); - } - LOG.debug("Going to execute query <{}>", sb); - try (ResultSet rs = pst.executeQuery()) { - if(rs.next()) { - long enqueuedId = rs.getLong(1); - String state = CompactionState.fromSqlConst(rs.getString(2)).toString(); - LOG.info("Ignoring request to compact {}/{}/{} since it is already {} with id={}", rqst.getDbname(), - rqst.getTablename(), rqst.getPartitionname(), quoteString(state), enqueuedId); - CompactionResponse resp = new CompactionResponse(-1, REFUSED_RESPONSE, false); - resp.setErrormessage("Compaction is already scheduled with state=" + quoteString(state) + - " and id=" + enqueuedId); - return resp; - } - } - } - List params = new ArrayList<>(); - StringBuilder buf = new StringBuilder("INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_DATABASE\", " + - "\"CQ_TABLE\", "); - String partName = rqst.getPartitionname(); - if (partName != null) buf.append("\"CQ_PARTITION\", "); - buf.append("\"CQ_STATE\", \"CQ_TYPE\", \"CQ_ENQUEUE_TIME\", \"CQ_POOL_NAME\""); - if (rqst.isSetNumberOfBuckets()) { - buf.append(", \"CQ_NUMBER_OF_BUCKETS\""); - } - if (rqst.isSetOrderByClause()) { - buf.append(", \"CQ_ORDER_BY\""); - } - if (rqst.getProperties() != null) { - buf.append(", \"CQ_TBLPROPERTIES\""); - } - if (rqst.getRunas() != null) { - buf.append(", \"CQ_RUN_AS\""); - } - if (rqst.getInitiatorId() != null) { - buf.append(", \"CQ_INITIATOR_ID\""); - } - if (rqst.getInitiatorVersion() != null) { - buf.append(", \"CQ_INITIATOR_VERSION\""); - } - buf.append(") values ("); - buf.append(id); - buf.append(", ?"); - buf.append(", ?"); - buf.append(", "); - params.add(rqst.getDbname()); - params.add(rqst.getTablename()); - if (partName != null) { - buf.append("?, '"); - params.add(partName); - } else { - buf.append("'"); - } - buf.append(INITIATED_STATE); - buf.append("', '"); - buf.append(TxnUtils.thriftCompactionType2DbType(rqst.getType())); - buf.append("',"); - buf.append(getEpochFn(dbProduct)); - buf.append(", ?"); - params.add(rqst.getPoolName()); - if (rqst.isSetNumberOfBuckets()) { - buf.append(", ").append(rqst.getNumberOfBuckets()); - } - if (rqst.isSetOrderByClause()) { - buf.append(", ?"); - params.add(rqst.getOrderByClause()); - } - if (rqst.getProperties() != null) { - buf.append(", ?"); - params.add(new StringableMap(rqst.getProperties()).toString()); - } - if (rqst.getRunas() != null) { - buf.append(", ?"); - params.add(rqst.getRunas()); - } - if (rqst.getInitiatorId() != null) { - buf.append(", ?"); - params.add(rqst.getInitiatorId()); - } - if (rqst.getInitiatorVersion() != null) { - buf.append(", ?"); - params.add(rqst.getInitiatorVersion()); - } - buf.append(")"); - String s = buf.toString(); - - try (PreparedStatement pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params)) { - LOG.debug("Going to execute update <{}>", s); - pst.executeUpdate(); - } - LOG.debug("Going to commit"); - dbConn.commit(); - return new CompactionResponse(id, INITIATED_RESPONSE, true); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - dbConn.rollback(); - throw e; - } - } - } catch (SQLException e) { - checkRetryable(e, "COMPACT(" + rqst + ")"); - throw new MetaException("Unable to put the compaction request into the queue: " + - StringUtils.stringifyException(e)); - } finally { - if (handle != null) { - handle.releaseLocks(); - } - unlockInternal(); - } - } catch (RetryException e) { - return compact(rqst); - } - } - - @Override - @RetrySemantics.SafeToRetry - public boolean submitForCleanup(CompactionRequest rqst, long highestWriteId, long txnId) throws MetaException { - // Put a compaction request in the queue. - try { - Connection dbConn = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - lockInternal(); - - List params = new ArrayList() {{ - add(rqst.getDbname()); - add(rqst.getTablename()); - }}; - long cqId; - try (Statement stmt = dbConn.createStatement()) { - cqId = generateCompactionQueueId(stmt); - } - StringBuilder buf = new StringBuilder( - "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_HIGHEST_WRITE_ID\", \"CQ_TXN_ID\", \"CQ_ENQUEUE_TIME\", \"CQ_DATABASE\", \"CQ_TABLE\", "); - String partName = rqst.getPartitionname(); - if (partName != null) { - buf.append("\"CQ_PARTITION\", "); - params.add(partName); - } - buf.append("\"CQ_STATE\", \"CQ_TYPE\""); - params.add(String.valueOf(READY_FOR_CLEANING)); - params.add(TxnUtils.thriftCompactionType2DbType(rqst.getType()).toString()); - - if (rqst.getProperties() != null) { - buf.append(", \"CQ_TBLPROPERTIES\""); - params.add(new StringableMap(rqst.getProperties()).toString()); - } - if (rqst.getRunas() != null) { - buf.append(", \"CQ_RUN_AS\""); - params.add(rqst.getRunas()); - } - buf.append(") values (") - .append( - Stream.of(cqId, highestWriteId, txnId, getEpochFn(dbProduct)) - .map(Object::toString) - .collect(Collectors.joining(", "))) - .append(repeat(", ?", params.size())) - .append(")"); - - String s = buf.toString(); - try (PreparedStatement pst = sqlGenerator.prepareStmtWithParameters(dbConn, s, params)) { - LOG.debug("Going to execute update <{}>", s); - pst.executeUpdate(); - } - LOG.debug("Going to commit"); - dbConn.commit(); - return true; - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "submitForCleanup(" + rqst + ")"); - throw new MetaException("Failed to submit cleanup request: " + - StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - unlockInternal(); - } - } catch (RetryException e) { - return submitForCleanup(rqst, highestWriteId, txnId); - } - } - - @RetrySemantics.ReadOnly - @SuppressWarnings("squid:S2095") - public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException { - try { - ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>()); - String query = TxnQueries.SHOW_COMPACTION_QUERY + - getShowCompactFilterClause(rqst) + - getShowCompactSortingOrderClause(rqst); - List params = getShowCompactParamList(rqst); - - try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - PreparedStatement stmt = sqlGenerator.prepareStmtWithParameters(dbConn, query, params)) { - if (rqst.isSetId()) { - stmt.setLong(1, rqst.getId()); - } - int rowLimit = (int) rqst.getLimit(); - if (rowLimit > 0) { - stmt.setMaxRows(rowLimit); - } - LOG.debug("Going to execute query <" + query + ">"); - try (ResultSet rs = stmt.executeQuery()) { - while (rs.next()) { - ShowCompactResponseElement e = new ShowCompactResponseElement(); - e.setDbname(rs.getString(1)); - e.setTablename(rs.getString(2)); - e.setPartitionname(rs.getString(3)); - e.setState(CompactionState.fromSqlConst(rs.getString(4)).toString()); - try { - e.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0))); - } catch (SQLException ex) { - //do nothing to handle RU/D if we add another status - } - e.setWorkerid(rs.getString(6)); - long start = rs.getLong(7); - if (!rs.wasNull()) { - e.setStart(start); - } - long endTime = rs.getLong(8); - if (endTime != -1) { - e.setEndTime(endTime); - } - e.setRunAs(rs.getString(9)); - e.setHadoopJobId(rs.getString(10)); - e.setId(rs.getLong(11)); - e.setErrorMessage(rs.getString(12)); - long enqueueTime = rs.getLong(13); - if (!rs.wasNull()) { - e.setEnqueueTime(enqueueTime); - } - e.setWorkerVersion(rs.getString(14)); - e.setInitiatorId(rs.getString(15)); - e.setInitiatorVersion(rs.getString(16)); - long cleanerStart = rs.getLong(17); - if (!rs.wasNull() && (cleanerStart != -1)) { - e.setCleanerStart(cleanerStart); - } - String poolName = rs.getString(18); - if (isBlank(poolName)) { - e.setPoolName(DEFAULT_POOL_NAME); - } else { - e.setPoolName(poolName); - } - e.setTxnId(rs.getLong(19)); - e.setNextTxnId(rs.getLong(20)); - e.setCommitTime(rs.getLong(21)); - e.setHightestTxnId(rs.getLong(22)); - response.addToCompacts(e); - } - } - } catch (SQLException e) { - checkRetryable(e, "showCompact(" + rqst + ")"); - throw new MetaException("Unable to select from transaction database " + - StringUtils.stringifyException(e)); - } - return response; - } catch (RetryException e) { - return showCompact(rqst); - } - } - - private String getShowCompactSortingOrderClause(ShowCompactRequest request) { - String sortingOrder = request.getOrder(); - return isNotBlank(sortingOrder) ? " ORDER BY " + sortingOrder : TxnQueries.SHOW_COMPACTION_ORDERBY_CLAUSE; - } - - private List getShowCompactParamList(ShowCompactRequest request) throws MetaException { - if (request.getId() > 0) { - return Collections.emptyList(); - } - String poolName = request.getPoolName(); - String dbName = request.getDbName(); - String tableName = request.getTbName(); - String partName = request.getPartName(); - CompactionType type = request.getType(); - String state = request.getState(); - - List params = new ArrayList<>(); - if (isNotBlank(dbName)) { - params.add(dbName); - } - if (isNotBlank(tableName)) { - params.add(tableName); - } - if (isNotBlank(partName)) { - params.add(partName); - } - if (isNotBlank(state)) { - params.add(state); - } - if (type != null) { - params.add(TxnUtils.thriftCompactionType2DbType(type).toString()); - } - if (isNotBlank(poolName)) { - params.add(poolName); - } - return params; - } - - private String getShowCompactFilterClause(ShowCompactRequest request) { - List params = new ArrayList<>(); - - if (request.getId() > 0) { - params.add("\"CC_ID\"=?"); - } else { - if (isNotBlank(request.getDbName())) { - params.add("\"CC_DATABASE\"=?"); - } - if (isNotBlank(request.getTbName())) { - params.add("\"CC_TABLE\"=?"); - } - if (isNotBlank(request.getPartName())) { - params.add("\"CC_PARTITION\"=?"); - } - if (isNotBlank(request.getState())) { - params.add("\"CC_STATE\"=?"); - } - if (request.getType() != null) { - params.add("\"CC_TYPE\"=?"); - } - if (isNotBlank(request.getPoolName())) { - params.add("\"CC_POOL_NAME\"=?"); - } - } - return !params.isEmpty() ? - " WHERE " + StringUtils.join(" AND ", params) : EMPTY; - } - - /** - * We assume this is only called by metadata cache server to know if there are new base/delta files should be read. - * The query filters compactions by state and only returns SUCCEEDED or READY_FOR_CLEANING compactions because - * only these two states means there are new files ready to be read. - */ - @RetrySemantics.ReadOnly - public GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo( - GetLatestCommittedCompactionInfoRequest rqst) throws MetaException { - GetLatestCommittedCompactionInfoResponse response = new GetLatestCommittedCompactionInfoResponse(new ArrayList<>()); - Connection dbConn = null; - PreparedStatement pst = null; - ResultSet rs = null; - try { - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - - List params = new ArrayList<>(); - // This query combines the result sets of SUCCEEDED compactions and READY_FOR_CLEANING compactions - // We also sort the result by CC_ID in descending order so that we can keep only the latest record - // according to the order in result set - StringBuilder sb = new StringBuilder() - .append("SELECT * FROM (") - .append(" SELECT") - .append(" \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_TYPE\"") - .append(" FROM \"COMPLETED_COMPACTIONS\"") - .append(" WHERE \"CC_STATE\" = " + quoteChar(SUCCEEDED_STATE)) - .append(" UNION ALL") - .append(" SELECT") - .append(" \"CQ_ID\" AS \"CC_ID\", \"CQ_DATABASE\" AS \"CC_DATABASE\"") - .append(" ,\"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\"") - .append(" ,\"CQ_TYPE\" AS \"CC_TYPE\"") - .append(" FROM \"COMPACTION_QUEUE\"") - .append(" WHERE \"CQ_STATE\" = " + quoteChar(READY_FOR_CLEANING)) - .append(") AS compactions ") - .append(" WHERE \"CC_DATABASE\" = ? AND \"CC_TABLE\" = ?"); - params.add(rqst.getDbname()); - params.add(rqst.getTablename()); - if (rqst.getPartitionnamesSize() > 0) { - sb.append(" AND \"CC_PARTITION\" IN ("); - sb.append(String.join(",", - Collections.nCopies(rqst.getPartitionnamesSize(), "?"))); - sb.append(")"); - params.addAll(rqst.getPartitionnames()); - } - if (rqst.isSetLastCompactionId()) { - sb.append(" AND \"CC_ID\" > ?"); - } - sb.append(" ORDER BY \"CC_ID\" DESC"); - - pst = sqlGenerator.prepareStmtWithParameters(dbConn, sb.toString(), params); - if (rqst.isSetLastCompactionId()) { - pst.setLong(params.size() + 1, rqst.getLastCompactionId()); - } - LOG.debug("Going to execute query <{}>", sb); - rs = pst.executeQuery(); - Set partitionSet = new HashSet<>(); - while (rs.next()) { - CompactionInfoStruct lci = new CompactionInfoStruct(); - lci.setId(rs.getLong(1)); - lci.setDbname(rs.getString(2)); - lci.setTablename(rs.getString(3)); - String partition = rs.getString(4); - if (!rs.wasNull()) { - lci.setPartitionname(partition); - } - lci.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0))); - // Only put the latest record of each partition into response - if (!partitionSet.contains(partition)) { - response.addToCompactions(lci); - partitionSet.add(partition); - } - } - } catch (SQLException e) { - LOG.error("Unable to execute query", e); - checkRetryable(e, "getLatestCommittedCompactionInfo"); - } finally { - close(rs, pst, dbConn); - } - return response; - } catch (RetryException e) { - return getLatestCommittedCompactionInfo(rqst); - } - } - - public MetricsInfo getMetricsInfo() throws MetaException { - Connection dbConn = null; - try { - MetricsInfo metrics = new MetricsInfo(); - String s = MessageFormat.format(SELECT_METRICS_INFO_QUERY, getEpochFn(dbProduct)); - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - try (Statement stmt = dbConn.createStatement()){ - ResultSet rs = stmt.executeQuery(s); - if (rs.next()) { - metrics.setTxnToWriteIdCount(rs.getInt(1)); - metrics.setCompletedTxnsCount(rs.getInt(2)); - metrics.setOpenReplTxnsCount(rs.getInt(3)); - metrics.setOldestOpenReplTxnId(rs.getInt(4)); - metrics.setOldestOpenReplTxnAge(rs.getInt(5)); - metrics.setOpenNonReplTxnsCount(rs.getInt(6)); - metrics.setOldestOpenNonReplTxnId(rs.getInt(7)); - metrics.setOldestOpenNonReplTxnAge(rs.getInt(8)); - metrics.setAbortedTxnsCount(rs.getInt(9)); - metrics.setOldestAbortedTxnId(rs.getInt(10)); - metrics.setOldestAbortedTxnAge(rs.getInt(11)); - metrics.setLocksCount(rs.getInt(12)); - metrics.setOldestLockAge(rs.getInt(13)); - metrics.setOldestReadyForCleaningAge(rs.getInt(14)); - } - } - try (PreparedStatement pstmt = dbConn.prepareStatement(SELECT_TABLES_WITH_X_ABORTED_TXNS)) { - Set resourceNames = new TreeSet<>(); - pstmt.setInt(1, MetastoreConf.getIntVar(conf, ConfVars.METASTORE_ACIDMETRICS_TABLES_WITH_ABORTED_TXNS_THRESHOLD)); - ResultSet rs = pstmt.executeQuery(); - while (rs.next()) { - String resourceName = rs.getString(1) + "." + rs.getString(2); - String partName = rs.getString(3); - resourceName = partName != null ? resourceName + "#" + partName : resourceName; - resourceNames.add(resourceName); - } - metrics.setTablesWithXAbortedTxnsCount(resourceNames.size()); - metrics.setTablesWithXAbortedTxns(resourceNames); - } - return metrics; - } catch (SQLException e) { - LOG.error("Unable to getMetricsInfo", e); - checkRetryable(e, "getMetricsInfo"); - throw new MetaException("Unable to execute getMetricsInfo() " + StringUtils.stringifyException(e)); - } finally { - closeDbConn(dbConn); - } - } catch (RetryException e) { - return getMetricsInfo(); - } - } - - - private static void shouldNeverHappen(long txnid) { - throw new RuntimeException("This should never happen: " + JavaUtils.txnIdToString(txnid)); - } - private static void shouldNeverHappen(long txnid, long extLockId, long intLockId) { - throw new RuntimeException("This should never happen: " + JavaUtils.txnIdToString(txnid) + " " - + JavaUtils.lockIdToString(extLockId) + " " + intLockId); - } - - /** - * Retry-by-caller note: - * This may be retried after dbConn.commit. At worst, it will create duplicate entries in - * TXN_COMPONENTS which won't affect anything. See more comments in {@link #commitTxn(CommitTxnRequest)} - */ - @Override - @RetrySemantics.SafeToRetry - public void addDynamicPartitions(AddDynamicPartitions rqst) - throws NoSuchTxnException, TxnAbortedException, MetaException { - Connection dbConn = null; - Statement stmt = null; - try { - try { - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - TxnType txnType = getOpenTxnTypeAndLock(stmt, rqst.getTxnid()); - if (txnType == null) { - //ensures txn is still there and in expected state - ensureValidTxn(dbConn, rqst.getTxnid(), stmt); - shouldNeverHappen(rqst.getTxnid()); - } - //for RU this may be null so we should default it to 'u' which is most restrictive - OperationType ot = OperationType.UPDATE; - if(rqst.isSetOperationType()) { - ot = OperationType.fromDataOperationType(rqst.getOperationType()); - } - - Long writeId = rqst.getWriteid(); - try (PreparedStatement pstmt = dbConn.prepareStatement(TXN_COMPONENTS_INSERT_QUERY)) { - int insertCounter = 0; - for (String partName : rqst.getPartitionnames()) { - pstmt.setLong(1, rqst.getTxnid()); - pstmt.setString(2, normalizeCase(rqst.getDbname())); - pstmt.setString(3, normalizeCase(rqst.getTablename())); - pstmt.setString(4, partName); - pstmt.setString(5, ot.getSqlConst()); - pstmt.setObject(6, writeId); - - pstmt.addBatch(); - insertCounter++; - if (insertCounter % maxBatchSize == 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", TXN_COMPONENTS_INSERT_QUERY, maxBatchSize); - pstmt.executeBatch(); - } - } - if (insertCounter % maxBatchSize != 0) { - LOG.debug("Executing a batch of <{}> queries. Batch size: {}", TXN_COMPONENTS_INSERT_QUERY, - insertCounter % maxBatchSize); - pstmt.executeBatch(); - } - } - try (PreparedStatement pstmt = dbConn.prepareStatement(TXN_COMPONENTS_DP_DELETE_QUERY)) { - pstmt.setLong(1, rqst.getTxnid()); - pstmt.setString(2, normalizeCase(rqst.getDbname())); - pstmt.setString(3, normalizeCase(rqst.getTablename())); - pstmt.execute(); - } - LOG.debug("Going to commit"); - dbConn.commit(); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "addDynamicPartitions(" + rqst + ")"); - throw new MetaException("Unable to insert into from transaction database " + - StringUtils.stringifyException(e)); - } finally { - close(null, stmt, dbConn); - unlockInternal(); - } - } catch (RetryException e) { - addDynamicPartitions(rqst); - } - } - - /** - * Clean up corresponding records in metastore tables when corresponding object is dropped, - * specifically: TXN_COMPONENTS, COMPLETED_TXN_COMPONENTS, COMPACTION_QUEUE, COMPLETED_COMPACTIONS - * Retry-by-caller note: this is only idempotent assuming it's only called by dropTable/Db/etc - * operations. - * - * HIVE_LOCKS and WS_SET are cleaned up by {@link AcidHouseKeeperService}, if turned on - */ - @Override - @RetrySemantics.Idempotent - public void cleanupRecords(HiveObjectType type, Database db, Table table, - Iterator partitionIterator, boolean keepTxnToWriteIdMetaData) throws MetaException { - cleanupRecords(type, db, table, partitionIterator, keepTxnToWriteIdMetaData, 0); - } - - @Override - @RetrySemantics.Idempotent - public void cleanupRecords(HiveObjectType type, Database db, Table table, - Iterator partitionIterator, long txnId) throws MetaException { - cleanupRecords(type, db , table, partitionIterator, false, txnId); - } - - private void cleanupRecords(HiveObjectType type, Database db, Table table, - Iterator partitionIterator, boolean keepTxnToWriteIdMetaData, long txnId) throws MetaException { - - // cleanup should be done only for objects belonging to default catalog - final String defaultCatalog = getDefaultCatalog(conf); - - try { - Connection dbConn = null; - Statement stmt = null; - - try { - String dbName; - String tblName; - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - List queries = new ArrayList<>(); - StringBuilder buff = new StringBuilder(); - - switch (type) { - case DATABASE: { - dbName = db.getName(); - if(!defaultCatalog.equals(db.getCatalogName())) { - LOG.debug("Skipping cleanup because db: " + dbName + " belongs to catalog " - + "other than default catalog: " + db.getCatalogName()); - return; - } - - buff.append("DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_DATABASE\"='"); - buff.append(dbName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_DATABASE\"='"); - buff.append(dbName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPACTION_QUEUE\" WHERE \"CQ_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CQ_TXN_ID\"!=").append(txnId); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE \"CC_DATABASE\"='"); - buff.append(dbName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\"='"); - buff.append(dbName.toLowerCase()); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\"='"); - buff.append(dbName.toLowerCase()); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE \"CMC_DATABASE\"='"); - buff.append(dbName); - buff.append("'"); - queries.add(buff.toString()); - - break; - } - case TABLE: { - dbName = table.getDbName(); - tblName = table.getTableName(); - if(!defaultCatalog.equals(table.getCatName())) { - LOG.debug("Skipping cleanup because table: {} belongs to catalog other than default catalog: {}", tblName, - table.getCatName()); - return; - } - - buff.append("DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"TC_TABLE\"='"); - buff.append(tblName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CTC_TABLE\"='"); - buff.append(tblName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPACTION_QUEUE\" WHERE \"CQ_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CQ_TABLE\"='"); - buff.append(tblName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE \"CC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CC_TABLE\"='"); - buff.append(tblName); - buff.append("'"); - queries.add(buff.toString()); - - if (!keepTxnToWriteIdMetaData) { - buff.setLength(0); - buff.append("DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\"='"); - buff.append(dbName.toLowerCase()); - buff.append("' AND \"T2W_TABLE\"='"); - buff.append(tblName.toLowerCase()); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\"='"); - buff.append(dbName.toLowerCase()); - buff.append("' AND \"NWI_TABLE\"='"); - buff.append(tblName.toLowerCase()); - buff.append("'"); - queries.add(buff.toString()); - } - buff.setLength(0); - buff.append("DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE \"CMC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CMC_TABLE\"='"); - buff.append(tblName); - buff.append("'"); - queries.add(buff.toString()); - - break; - } - case PARTITION: { - dbName = table.getDbName(); - tblName = table.getTableName(); - if(!defaultCatalog.equals(table.getCatName())) { - LOG.debug("Skipping cleanup because partitions belong to catalog other than default catalog: {}", - table.getCatName()); - return; - } - - List partCols = table.getPartitionKeys(); // partition columns - List partVals; // partition values - String partName; - - while (partitionIterator.hasNext()) { - Partition p = partitionIterator.next(); - partVals = p.getValues(); - partName = Warehouse.makePartName(partCols, partVals); - - buff.setLength(0); - buff.append("DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"TC_TABLE\"='"); - buff.append(tblName); - buff.append("' AND \"TC_PARTITION\"='"); - buff.append(partName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CTC_TABLE\"='"); - buff.append(tblName); - buff.append("' AND \"CTC_PARTITION\"='"); - buff.append(partName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPACTION_QUEUE\" WHERE \"CQ_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CQ_TABLE\"='"); - buff.append(tblName); - buff.append("' AND \"CQ_PARTITION\"='"); - buff.append(partName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE \"CC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CC_TABLE\"='"); - buff.append(tblName); - buff.append("' AND \"CC_PARTITION\"='"); - buff.append(partName); - buff.append("'"); - queries.add(buff.toString()); - - buff.setLength(0); - buff.append("DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE \"CMC_DATABASE\"='"); - buff.append(dbName); - buff.append("' AND \"CMC_TABLE\"='"); - buff.append(tblName); - buff.append("' AND \"CMC_PARTITION\"='"); - buff.append(partName); - buff.append("'"); - queries.add(buff.toString()); - } - - break; - } - default: { - throw new MetaException("Invalid object type for cleanup: " + type); - } - } - - for (String query : queries) { - LOG.debug("Going to execute update <{}>", query); - stmt.executeUpdate(query); - } - - LOG.debug("Going to commit"); - dbConn.commit(); - } catch (SQLException e) { - LOG.debug("Going to rollback: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "cleanupRecords"); - if (e.getMessage().contains("does not exist")) { - LOG.warn("Cannot perform cleanup since metastore table does not exist"); - } else { - throw new MetaException("Unable to clean up " + StringUtils.stringifyException(e)); - } - } finally { - closeStmt(stmt); - closeDbConn(dbConn); - } - } catch (RetryException e) { - cleanupRecords(type, db, table, partitionIterator, keepTxnToWriteIdMetaData); - } - } - /** - * Catalog hasn't been added to transactional tables yet, so it's passed in but not used. - */ - @Override - public void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName, - String newCatName, String newDbName, String newTabName, String newPartName) - throws MetaException { - String callSig = "onRename(" + - oldCatName + "," + oldDbName + "," + oldTabName + "," + oldPartName + "," + - newCatName + "," + newDbName + "," + newTabName + "," + newPartName + ")"; - - if(newPartName != null) { - assert oldPartName != null && oldTabName != null && oldDbName != null && oldCatName != null : - callSig; - } - if(newTabName != null) { - assert oldTabName != null && oldDbName != null && oldCatName != null : callSig; - } - if(newDbName != null) { - assert oldDbName != null && oldCatName != null : callSig; - } - - try { - Connection dbConn = null; - Statement stmt = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - List queries = new ArrayList<>(); - - String update = "UPDATE \"TXN_COMPONENTS\" SET "; - String where = " WHERE "; - if(oldPartName != null) { - update += "\"TC_PARTITION\" = " + quoteString(newPartName) + ", "; - where += "\"TC_PARTITION\" = " + quoteString(oldPartName) + " AND "; - } - if(oldTabName != null) { - update += "\"TC_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"TC_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"TC_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"TC_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"COMPLETED_TXN_COMPONENTS\" SET "; - where = " WHERE "; - if(oldPartName != null) { - update += "\"CTC_PARTITION\" = " + quoteString(newPartName) + ", "; - where += "\"CTC_PARTITION\" = " + quoteString(oldPartName) + " AND "; - } - if(oldTabName != null) { - update += "\"CTC_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"CTC_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"CTC_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"CTC_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"HIVE_LOCKS\" SET "; - where = " WHERE "; - if(oldPartName != null) { - update += "\"HL_PARTITION\" = " + quoteString(newPartName) + ", "; - where += "\"HL_PARTITION\" = " + quoteString(oldPartName) + " AND "; - } - if(oldTabName != null) { - update += "\"HL_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"HL_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"HL_DB\" = " + quoteString(normalizeCase(newDbName)); - where += "\"HL_DB\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"COMPACTION_QUEUE\" SET "; - where = " WHERE "; - if(oldPartName != null) { - update += "\"CQ_PARTITION\" = " + quoteString(newPartName) + ", "; - where += "\"CQ_PARTITION\" = " + quoteString(oldPartName) + " AND "; - } - if(oldTabName != null) { - update += "\"CQ_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"CQ_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"CQ_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"CQ_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"COMPLETED_COMPACTIONS\" SET "; - where = " WHERE "; - if(oldPartName != null) { - update += "\"CC_PARTITION\" = " + quoteString(newPartName) + ", "; - where += "\"CC_PARTITION\" = " + quoteString(oldPartName) + " AND "; - } - if(oldTabName != null) { - update += "\"CC_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"CC_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"CC_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"CC_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"WRITE_SET\" SET "; - where = " WHERE "; - if(oldPartName != null) { - update += "\"WS_PARTITION\" = " + quoteString(newPartName) + ", "; - where += "\"WS_PARTITION\" = " + quoteString(oldPartName) + " AND "; - } - if(oldTabName != null) { - update += "\"WS_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"WS_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"WS_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"WS_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"TXN_TO_WRITE_ID\" SET "; - where = " WHERE "; - if(oldTabName != null) { - update += "\"T2W_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"T2W_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"T2W_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"T2W_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"NEXT_WRITE_ID\" SET "; - where = " WHERE "; - if(oldTabName != null) { - update += "\"NWI_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"NWI_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if(oldDbName != null) { - update += "\"NWI_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"NWI_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - update = "UPDATE \"COMPACTION_METRICS_CACHE\" SET"; - where = " WHERE "; - if (oldPartName != null) { - update += "\"CMC_PARTITION\" = " + quoteString(normalizeCase(newPartName)) + ", "; - where += "\"CMC_PARTITION\" = " + quoteString(normalizeCase(oldPartName)) + " AND "; - } - if (oldTabName != null) { - update += "\"CMC_TABLE\" = " + quoteString(normalizeCase(newTabName)) + ", "; - where += "\"CMC_TABLE\" = " + quoteString(normalizeCase(oldTabName)) + " AND "; - } - if (oldDbName != null) { - update += "\"CMC_DATABASE\" = " + quoteString(normalizeCase(newDbName)); - where += "\"CMC_DATABASE\" = " + quoteString(normalizeCase(oldDbName)); - } - queries.add(update + where); - - for (String query : queries) { - LOG.debug("Going to execute update <{}>", query); - stmt.executeUpdate(query); - } - - LOG.debug("Going to commit: {}", callSig); - dbConn.commit(); - } catch (SQLException e) { - LOG.debug("Going to rollback: {}", callSig); - rollbackDBConn(dbConn); - checkRetryable(e, callSig); - if (e.getMessage().contains("does not exist")) { - LOG.warn("Cannot perform {} since metastore table does not exist", callSig); - } else { - throw new MetaException("Unable to " + callSig + ":" + StringUtils.stringifyException(e)); - } - } finally { - closeStmt(stmt); - closeDbConn(dbConn); - } - } catch (RetryException e) { - onRename(oldCatName, oldDbName, oldTabName, oldPartName, - newCatName, newDbName, newTabName, newPartName); - } - } - /** - * For testing only, do not use. - */ - @VisibleForTesting - public int numLocksInLockTable() throws SQLException, MetaException { - Connection dbConn = null; - Statement stmt = null; - ResultSet rs = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - String s = "SELECT COUNT(*) FROM \"HIVE_LOCKS\""; - LOG.debug("Going to execute query <{}>", s); - rs = stmt.executeQuery(s); - rs.next(); - int rc = rs.getInt(1); - // Necessary to clean up the transaction in the db. - dbConn.rollback(); - return rc; - } finally { - close(rs, stmt, dbConn); - } - } - - /** - * For testing only, do not use. - */ - public long setTimeout(long milliseconds) { - long previous_timeout = timeout; - timeout = milliseconds; - return previous_timeout; - } - - protected class RetryException extends Exception { - - } - - Connection getDbConn(int isolationLevel) throws SQLException { - return getDbConn(isolationLevel, connPool); - } - - protected Connection getDbConn(int isolationLevel, DataSource connPool) throws SQLException { - Connection dbConn = null; - try { - dbConn = connPool.getConnection(); - dbConn.setAutoCommit(false); - dbConn.setTransactionIsolation(isolationLevel); - return dbConn; - } catch (SQLException e) { - closeDbConn(dbConn); - throw e; - } - } - - static void rollbackDBConn(Connection dbConn) { - try { - if (dbConn != null && !dbConn.isClosed()) dbConn.rollback(); - } catch (SQLException e) { - LOG.warn("Failed to rollback db connection " + getMessage(e)); - } - } - protected static void closeDbConn(Connection dbConn) { - try { - if (dbConn != null && !dbConn.isClosed()) { - dbConn.close(); - } - } catch (SQLException e) { - LOG.warn("Failed to close db connection " + getMessage(e)); - } - } - - /** - * Close statement instance. - * @param stmt statement instance. - */ - protected static void closeStmt(Statement stmt) { - try { - if (stmt != null && !stmt.isClosed()) stmt.close(); - } catch (SQLException e) { - LOG.warn("Failed to close statement " + getMessage(e)); - } - } - - /** - * Close the ResultSet. - * @param rs may be {@code null} - */ - static void close(ResultSet rs) { - try { - if (rs != null && !rs.isClosed()) { - rs.close(); - } - } - catch(SQLException ex) { - LOG.warn("Failed to close statement " + getMessage(ex)); - } - } - - /** - * Close all 3 JDBC artifacts in order: {@code rs stmt dbConn} - */ - static void close(ResultSet rs, Statement stmt, Connection dbConn) { - close(rs); - closeStmt(stmt); - closeDbConn(dbConn); - } - - private boolean waitForRetry(String caller, String errMsg) { - if (retryNum++ < retryLimit) { - LOG.warn("Retryable error detected in {}. Will wait {} ms and retry up to {} times. Error: {}", caller, - retryInterval, (retryLimit - retryNum + 1), errMsg); - try { - Thread.sleep(retryInterval); - } catch (InterruptedException ex) { - // - } - return true; - } else { - LOG.error("Fatal error in {}. Retry limit ({}) reached. Last error: {}", caller, retryLimit, errMsg); - } - return false; - } - - /** - * See {@link #checkRetryable(SQLException, String, boolean)}. - */ - void checkRetryable(SQLException e, String caller) throws RetryException { - checkRetryable(e, caller, false); - } - - /** - * Determine if an exception was such that it makes sense to retry. Unfortunately there is no standard way to do - * this, so we have to inspect the error messages and catch the telltale signs for each - * different database. This method will throw {@code RetryException} - * if the error is retry-able. - * @param e exception that was thrown. - * @param caller name of the method calling this (and other info useful to log) - * @param retryOnDuplicateKey whether to retry on unique key constraint violation - * @throws org.apache.hadoop.hive.metastore.txn.TxnHandler.RetryException when the operation should be retried - */ - void checkRetryable(SQLException e, String caller, boolean retryOnDuplicateKey) - throws RetryException { - - // If you change this function, remove the @Ignore from TestTxnHandler.deadlockIsDetected() - // to test these changes. - // MySQL and MSSQL use 40001 as the state code for rollback. Postgres uses 40001 and 40P01. - // Oracle seems to return different SQLStates and messages each time, - // so I've tried to capture the different error messages (there appear to be fewer different - // error messages than SQL states). - // Derby and newer MySQL driver use the new SQLTransactionRollbackException - boolean sendRetrySignal = false; - try { - if(dbProduct == null) { - throw new IllegalStateException("DB Type not determined yet."); - } - if (dbProduct.isDeadlock(e)) { - if (deadlockCnt++ < ALLOWED_REPEATED_DEADLOCKS) { - long waitInterval = deadlockRetryInterval * deadlockCnt; - LOG.warn("Deadlock detected in {}. Will wait {} ms try again up to {} times.", caller, waitInterval, - (ALLOWED_REPEATED_DEADLOCKS - deadlockCnt + 1)); - // Pause for a just a bit for retrying to avoid immediately jumping back into the deadlock. - try { - Thread.sleep(waitInterval); - } catch (InterruptedException ie) { - // NOP - } - sendRetrySignal = true; - } else { - LOG.error("Too many repeated deadlocks in {}, giving up.", caller); - } - } else if (isRetryable(conf, e)) { - //in MSSQL this means Communication Link Failure - sendRetrySignal = waitForRetry(caller, e.getMessage()); - } else if (retryOnDuplicateKey && isDuplicateKeyError(e)) { - sendRetrySignal = waitForRetry(caller, e.getMessage()); - } - else { - //make sure we know we saw an error that we don't recognize - LOG.info("Non-retryable error in {} : {}", caller, getMessage(e)); - } - } - finally { - /*if this method ends with anything except a retry signal, the caller should fail the operation - and propagate the error up to the its caller (Metastore client); thus must reset retry counters*/ - if(!sendRetrySignal) { - deadlockCnt = 0; - retryNum = 0; - } - } - if(sendRetrySignal) { - throw new RetryException(); - } - } + @Override + public long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) throws MetaException { + return new ReleaseMaterializationRebuildLocks(validTxnList, timeout).execute(jdbcResource); + } /** - * Determine the current time, using the RDBMS as a source of truth - * @param conn database connection - * @return current time in milliseconds - * @throws org.apache.hadoop.hive.metastore.api.MetaException if the time cannot be determined + * As much as possible (i.e. in absence of retries) we want both operations to be done on the same + * connection (but separate transactions). + * Retry-by-caller note: If the call to lock is from a transaction, then in the worst case + * there will be a duplicate set of locks but both sets will belong to the same txn so they + * will not conflict with each other. For locks w/o txn context (i.e. read-only query), this + * may lead to deadlock (at least a long wait). (e.g. 1st call creates locks in {@code LOCK_WAITING} + * mode and response gets lost. Then {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient} + * retries, and enqueues another set of locks in LOCK_WAITING. The 2nd LockResponse is delivered + * to the DbLockManager, which will keep dong {@link #checkLock(CheckLockRequest)} until the 1st + * set of locks times out. */ - protected long getDbTime(Connection conn) throws MetaException { - Statement stmt = null; + @Override + public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { + long lockId = txnLockManager.enqueueLock(rqst); try { - stmt = conn.createStatement(); - String s = dbProduct.getDBTime(); - - LOG.debug("Going to execute query <{}>", s); - ResultSet rs = stmt.executeQuery(s); - if (!rs.next()) throw new MetaException("No results from date query"); - return rs.getTimestamp(1).getTime(); - } catch (SQLException e) { - String msg = "Unable to determine current time: " + e.getMessage(); - LOG.error(msg); - throw new MetaException(msg); - } finally { - closeStmt(stmt); + return txnLockManager.checkLock(lockId, rqst.getTxnid(), rqst.isZeroWaitReadEnabled(), rqst.isExclusiveCTAS()); + } catch (NoSuchLockException e) { + // This should never happen, as we just added the lock id + throw new MetaException("Couldn't find a lock we just created! " + e.getMessage()); } } - + /** - * Determine the current time, using the RDBMS as a source of truth - * @return current time in milliseconds - * @throws org.apache.hadoop.hive.metastore.api.MetaException if the time cannot be determined + * Why doesn't this get a txnid as parameter? The caller should either know the txnid or know there isn't one. + * Either way getTxnIdFromLockId() will not be needed. This would be a Thrift change. + *

+ * Also, when lock acquisition returns WAITING, it's retried every 15 seconds (best case, see DbLockManager.backoff(), + * in practice more often) + * which means this is heartbeating way more often than hive.txn.timeout and creating extra load on DB. + *

+ * The clients that operate in blocking mode, can't heartbeat a lock until the lock is acquired. + * We should make CheckLockRequest include timestamp or last request to skip unnecessary heartbeats. Thrift change. + *

+ * {@link #checkLock(CheckLockRequest)} must run at SERIALIZABLE + * (make sure some lock we are checking against doesn't move from W to A in another txn) + * but this method can heartbeat in separate txn at READ_COMMITTED. + *

+ * Retry-by-caller note: + * Retryable because {@link #checkLock(CheckLockRequest)} is */ - protected Timestamp getDbTime() throws MetaException { - return jdbcResource.getJdbcTemplate().queryForObject( - dbProduct.getDBTime(), - new MapSqlParameterSource(), - (ResultSet rs, int rowNum) -> rs.getTimestamp(1)); - } - - - protected String isWithinCheckInterval(String expr, long interval) throws MetaException { - return dbProduct.isWithinCheckInterval(expr, interval); - } - - private void determineDatabaseProduct(Connection conn) { - try { - String s = conn.getMetaData().getDatabaseProductName(); - dbProduct = DatabaseProduct.determineDatabaseProduct(s, conf); - if (dbProduct.isUNDEFINED()) { - String msg = "Unrecognized database product name <" + s + ">"; - LOG.error(msg); - throw new IllegalStateException(msg); - } - } catch (SQLException e) { - String msg = "Unable to get database product name"; - LOG.error(msg, e); - throw new IllegalStateException(msg, e); + @Override + public LockResponse checkLock(CheckLockRequest rqst) + throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { + long extLockId = rqst.getLockid(); + // Heartbeat on the lockid first, to assure that our lock is still valid. + // Then look up the lock info (hopefully in the cache). If these locks + // are associated with a transaction then heartbeat on that as well. + List lockInfos = jdbcResource.execute(new GetLocksByLockId(extLockId, 1, sqlGenerator)); + if (CollectionUtils.isEmpty(lockInfos)) { + throw new NoSuchLockException("No such lock " + JavaUtils.lockIdToString(extLockId)); + } + LockInfo lockInfo = lockInfos.get(0); + if (lockInfo.getTxnId() > 0) { + new HeartbeatTxnFunction(lockInfo.getTxnId()).execute(jdbcResource); + } else { + new HeartbeatLockFunction(rqst.getLockid()).execute(jdbcResource); } + return txnLockManager.checkLock(extLockId, lockInfo.getTxnId(), false, false); } - private static class LockInfo { - private final long extLockId; - private final long intLockId; - //0 means there is no transaction, i.e. it a select statement which is not part of - //explicit transaction or a IUD statement that is not writing to ACID table - private final long txnId; - private final String db; - private final String table; - private final String partition; - private final LockState state; - private final LockType type; - - // Assumes the result set is set to a valid row - LockInfo(ResultSet rs) throws SQLException, MetaException { - extLockId = rs.getLong("HL_LOCK_EXT_ID"); // can't be null - intLockId = rs.getLong("HL_LOCK_INT_ID"); // can't be null - db = rs.getString("HL_DB"); // can't be null - String t = rs.getString("HL_TABLE"); - table = (rs.wasNull() ? null : t); - String p = rs.getString("HL_PARTITION"); - partition = (rs.wasNull() ? null : p); - switch (rs.getString("HL_LOCK_STATE").charAt(0)) { - case LOCK_WAITING: state = LockState.WAITING; break; - case LOCK_ACQUIRED: state = LockState.ACQUIRED; break; - default: - throw new MetaException("Unknown lock state " + rs.getString("HL_LOCK_STATE").charAt(0)); - } - char lockChar = rs.getString("HL_LOCK_TYPE").charAt(0); - type = LockTypeUtil.getLockTypeFromEncoding(lockChar) - .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); - txnId = rs.getLong("HL_TXNID"); //returns 0 if value is NULL - } - - LockInfo(ShowLocksResponseElement e) { - extLockId = e.getLockid(); - intLockId = e.getLockIdInternal(); - txnId = e.getTxnid(); - db = e.getDbname(); - table = e.getTablename(); - partition = e.getPartname(); - state = e.getState(); - type = e.getType(); - } - - public boolean equals(Object other) { - if (!(other instanceof LockInfo)) return false; - LockInfo o = (LockInfo)other; - // Lock ids are unique across the system. - return extLockId == o.extLockId && intLockId == o.intLockId; - } - - @Override - public String toString() { - return JavaUtils.lockIdToString(extLockId) + " intLockId:" + - intLockId + " " + JavaUtils.txnIdToString(txnId) - + " db:" + db + " table:" + table + " partition:" + - partition + " state:" + (state == null ? "null" : state.toString()) - + " type:" + (type == null ? "null" : type.toString()); - } - private boolean isDbLock() { - return db != null && table == null && partition == null; - } - private boolean isTableLock() { - return db != null && table != null && partition == null; - } - private boolean isPartitionLock() { - return !(isDbLock() || isTableLock()); - } + /** + * This would have been made simpler if all locks were associated with a txn. Then only txn needs to + * be heartbeated, committed, etc. no need for client to track individual locks. + * When removing locks not associated with txn this potentially conflicts with + * heartbeat/performTimeout which are update/delete of HIVE_LOCKS thus will be locked as needed by db. + * since this only removes from HIVE_LOCKS at worst some lock acquire is delayed + */ + @Override + public void unlock(UnlockRequest rqst) throws TxnOpenException, MetaException { + txnLockManager.unlock(rqst); } - private static class LockInfoComparator implements Comparator, Serializable { - private LockTypeComparator lockTypeComparator = new LockTypeComparator(); - - public boolean equals(Object other) { - return this == other; - } - - public int compare(LockInfo info1, LockInfo info2) { - // We sort by state (acquired vs waiting) and then by LockType, then by id - if (info1.state == LockState.ACQUIRED && - info2.state != LockState .ACQUIRED) { - return -1; - } - if (info1.state != LockState.ACQUIRED && - info2.state == LockState .ACQUIRED) { - return 1; - } - - int sortByType = lockTypeComparator.compare(info1.type, info2.type); - if(sortByType != 0) { - return sortByType; - } - if (info1.extLockId < info2.extLockId) { - return -1; - } else if (info1.extLockId > info2.extLockId) { - return 1; - } else { - if (info1.intLockId < info2.intLockId) { - return -1; - } else if (info1.intLockId > info2.intLockId) { - return 1; - } else { - return 0; - } - } - } + @Override + public ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException { + return txnLockManager.showLocks(rqst); } - private enum LockAction {ACQUIRE, WAIT, KEEP_LOOKING} - - // A jump table to figure out whether to wait, acquire, - // or keep looking . Since - // java doesn't have function pointers (grumble grumble) we store a - // character that we'll use to determine which function to call. - // The table maps the lock type of the lock we are looking to acquire to - // the lock type of the lock we are checking to the lock state of the lock - // we are checking to the desired action. - private static Map>> jumpTable; - - private int abortTxns(Connection dbConn, List txnids, - boolean skipCount, boolean isReplReplayed, TxnErrorMsg txnErrorMsg) throws SQLException, MetaException { - return abortTxns(dbConn, txnids, false, skipCount, isReplReplayed, txnErrorMsg); - } /** - * TODO: expose this as an operation to client. Useful for streaming API to abort all remaining - * transactions in a batch on IOExceptions. - * Caller must rollback the transaction if not all transactions were aborted since this will not - * attempt to delete associated locks in this case. - * - * @param dbConn An active connection - * @param txnids list of transactions to abort - * @param checkHeartbeat value used by {@link #performTimeOuts()} to ensure this doesn't Abort txn which were - * heartbeated after #performTimeOuts() select and this operation. - * @param skipCount If true, the method always returns 0, otherwise returns the number of actually aborted txns - * @return 0 if skipCount is true, the number of aborted transactions otherwise - * @throws SQLException + * {@code ids} should only have txnid or lockid but not both, ideally. + * Currently DBTxnManager.heartbeat() enforces this. */ - private int abortTxns(Connection dbConn, List txnids, boolean checkHeartbeat, - boolean skipCount, boolean isReplReplayed, TxnErrorMsg txnErrorMsg) - throws SQLException, MetaException { - Statement stmt = null; - if (txnids.isEmpty()) { - return 0; - } - Collections.sort(txnids); - LOG.debug("Aborting {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg); - removeTxnsFromMinHistoryLevel(dbConn, txnids); - removeWriteIdsFromMinHistory(dbConn, txnids); - try { - stmt = dbConn.createStatement(); - //This is an update statement, thus at any Isolation level will take Write locks so will block - //all other ops using S4U on TXNS row. - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - - // add update txns queries to query list - prefix.append("UPDATE \"TXNS\" SET \"TXN_STATE\" = ").append(TxnStatus.ABORTED) - .append(" , \"TXN_META_INFO\" = ").append(txnErrorMsg.toSqlString()) - .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN).append(" AND "); - if (checkHeartbeat) { - suffix.append(" AND \"TXN_LAST_HEARTBEAT\" < ") - .append(getEpochFn(dbProduct)).append("-").append(timeout); - } - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"TXN_ID\"", true, false); - int numUpdateQueries = queries.size(); - - // add delete hive locks queries to query list - prefix.setLength(0); - suffix.setLength(0); - prefix.append("DELETE FROM \"HIVE_LOCKS\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"HL_TXNID\"", false, false); - - //If this abort is for REPL_CREATED TXN initiated outside the replication flow, then clean the corresponding entry - //from REPL_TXN_MAP and mark that database as replication incompatible. - if (!isReplReplayed) { - for (String database : getDbNamesForReplayedTxns(dbConn, txnids)) { - markDbAsReplIncompatible(dbConn, database); - } - // Delete mapping from REPL_TXN_MAP if it exists. - prefix.setLength(0); - suffix.setLength(0); - prefix.append("DELETE FROM \"REPL_TXN_MAP\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"RTM_TARGET_TXN_ID\"", false, false); - } - - // execute all queries in the list in one batch - int numAborted = 0; - if (skipCount) { - executeQueriesInBatchNoCount(dbProduct, stmt, queries, maxBatchSize); - } else { - List affectedRowsByQuery = executeQueriesInBatch(stmt, queries, maxBatchSize); - numAborted = getUpdateCount(numUpdateQueries, affectedRowsByQuery); - } - - if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { - Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size()); - } - LOG.warn("Aborted {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg); - return numAborted; - } finally { - closeStmt(stmt); - } + @Override + public void heartbeat(HeartbeatRequest ids) + throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException { + new HeartbeatTxnFunction(ids.getTxnid()).execute(jdbcResource); + new HeartbeatLockFunction(ids.getLockid()).execute(jdbcResource); } - - private int getUpdateCount(int numUpdateQueries, List affectedRowsByQuery) { - return affectedRowsByQuery.stream() - .limit(numUpdateQueries) - .mapToInt(Integer::intValue) - .sum(); + + @Override + public HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) throws MetaException { + return new HeartbeatTxnRangeFunction(rqst).execute(jdbcResource); } - private static boolean isValidTxn(long txnId) { - return txnId != 0; + @Override + public long getTxnIdForWriteId(String dbName, String tblName, long writeId) throws MetaException { + return jdbcResource.execute(new TxnIdForWriteIdHandler(writeId, dbName, tblName)); } - /** - * Lock acquisition is meant to be fair, so every lock can only block on some lock with smaller - * hl_lock_ext_id by only checking earlier locks. - * - * For any given SQL statement all locks required by it are grouped under single extLockId and are - * granted all at once or all locks wait. - * - * This is expected to run at READ_COMMITTED. - * - * If there is a concurrent commitTxn/rollbackTxn, those can only remove rows from HIVE_LOCKS. - * If they happen to be for the same txnid, there will be a WW conflict (in MS DB), if different txnid, - * checkLock() will in the worst case keep locks in Waiting state a little longer. - */ - @RetrySemantics.SafeToRetry("See @SafeToRetry") - private LockResponse checkLock(Connection dbConn, long extLockId, long txnId, boolean zeroWaitReadEnabled, - boolean isExclusiveCTAS) - throws NoSuchLockException, TxnAbortedException, MetaException, SQLException { - Statement stmt = null; - ResultSet rs = null; - LockResponse response = new LockResponse(); - /** - * todo: Longer term we should pass this from client somehow - this would be an optimization; once - * that is in place make sure to build and test "writeSet" below using OperationType not LockType - * With Static Partitions we assume that the query modifies exactly the partitions it locked. (not entirely - * realistic since Update/Delete may have some predicate that filters out all records out of - * some partition(s), but plausible). For DP, we acquire locks very wide (all known partitions), - * but for most queries only a fraction will actually be updated. #addDynamicPartitions() tells - * us exactly which ones were written to. Thus using this trick to kill a query early for - * DP queries may be too restrictive. - */ - boolean isPartOfDynamicPartitionInsert = true; - try { - List locksBeingChecked = getLocksFromLockId(dbConn, extLockId); //being acquired now - response.setLockid(extLockId); - - //This is the set of entities that the statement represented by extLockId wants to update - List writeSet = new ArrayList<>(); - - for (LockInfo info : locksBeingChecked) { - if(!isPartOfDynamicPartitionInsert && info.type == LockType.SHARED_WRITE) { - writeSet.add(info); - } - } - if(!writeSet.isEmpty()) { - if(writeSet.get(0).txnId == 0) { - //Write operation always start a txn - throw new IllegalStateException("Found Write lock for " + JavaUtils.lockIdToString(extLockId) + " but no txnid"); - } - stmt = dbConn.createStatement(); - StringBuilder sb = new StringBuilder(" \"WS_DATABASE\", \"WS_TABLE\", \"WS_PARTITION\", " + - "\"WS_TXNID\", \"WS_COMMIT_ID\" " + - "FROM \"WRITE_SET\" WHERE WS_COMMIT_ID >= " + writeSet.get(0).txnId + " AND (");//see commitTxn() for more info on this inequality - for(LockInfo info : writeSet) { - sb.append("(\"WS_DATABASE\" = ").append(quoteString(info.db)).append(" AND \"WS_TABLE\" = ") - .append(quoteString(info.table)).append(" AND \"WS_PARTITION\" ") - .append(info.partition == null ? "IS NULL" : "= " + quoteString(info.partition)).append(") OR "); - } - sb.setLength(sb.length() - 4);//nuke trailing " or " - sb.append(")"); - //1 row is sufficient to know we have to kill the query - rs = stmt.executeQuery(sqlGenerator.addLimitClause(1, sb.toString())); - if(rs.next()) { - /** - * if here, it means we found an already committed txn which overlaps with the current one and - * it updated the same resource the current txn wants to update. By First-committer-wins - * rule, current txn will not be allowed to commit so may as well kill it now; This is just an - * optimization to prevent wasting cluster resources to run a query which is known to be DOA. - * {@link #commitTxn(CommitTxnRequest)} has the primary responsibility to ensure this. - * checkLock() runs at READ_COMMITTED so you could have another (Hive) txn running commitTxn() - * in parallel and thus writing to WRITE_SET. commitTxn() logic is properly mutexed to ensure - * that we don't "miss" any WW conflicts. We could've mutexed the checkLock() and commitTxn() - * as well but this reduces concurrency for very little gain. - * Note that update/delete (which runs as dynamic partition insert) acquires a lock on the table, - * but WRITE_SET has entries for actual partitions updated. Thus this optimization will "miss" - * the WW conflict but it will be caught in commitTxn() where actual partitions written are known. - * This is OK since we want 2 concurrent updates that update different sets of partitions to both commit. - */ - String resourceName = rs.getString(1) + '/' + rs.getString(2); - String partName = rs.getString(3); - if(partName != null) { - resourceName += '/' + partName; - } - String msg = "Aborting " + JavaUtils.txnIdToString(writeSet.get(0).txnId) + - " since a concurrent committed transaction [" + JavaUtils.txnIdToString(rs.getLong(4)) + "," + rs.getLong(5) + - "] has already updated resource '" + resourceName + "'"; - LOG.info(msg); - if (abortTxns(dbConn, Collections.singletonList(writeSet.get(0).txnId), false, false, - TxnErrorMsg.ABORT_CONCURRENT) != 1) { - throw new IllegalStateException(msg + " FAILED!"); - } - dbConn.commit(); - throw new TxnAbortedException(msg); - } - close(rs, stmt, null); - } - - String queryStr = - " \"EX\".*, \"REQ\".\"HL_LOCK_INT_ID\" \"LOCK_INT_ID\", \"REQ\".\"HL_LOCK_TYPE\" \"LOCK_TYPE\" FROM (" + - " SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\"," + - " \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" + - " WHERE \"HL_LOCK_EXT_ID\" < " + extLockId + ") \"EX\"" + - " INNER JOIN (" + - " SELECT \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\"," + - " \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" + - " WHERE \"HL_LOCK_EXT_ID\" = " + extLockId + ") \"REQ\"" + - " ON \"EX\".\"HL_DB\" = \"REQ\".\"HL_DB\"" + - " AND (\"EX\".\"HL_TABLE\" IS NULL OR \"REQ\".\"HL_TABLE\" IS NULL" + - " OR \"EX\".\"HL_TABLE\" = \"REQ\".\"HL_TABLE\"" + - " AND (\"EX\".\"HL_PARTITION\" IS NULL OR \"REQ\".\"HL_PARTITION\" IS NULL" + - " OR \"EX\".\"HL_PARTITION\" = \"REQ\".\"HL_PARTITION\"))" + - /*different locks from same txn should not conflict with each other, - txnId=0 means it's a select or IUD which does not write to ACID table*/ - " WHERE (\"REQ\".\"HL_TXNID\" = 0 OR \"EX\".\"HL_TXNID\" != \"REQ\".\"HL_TXNID\")" + - " AND "; - - /*EXCLUSIVE lock on partition should prevent SHARED_READ on the table, however there is no reason - for an EXCLUSIVE on a table to prevent SHARED_READ on a database. Similarly, EXCLUSIVE on a partition - should not conflict with SHARED_READ on a database. - SHARED_READ is usually acquired on a database to make sure it's not dropped, while some operation - is performed on that db (e.g. show tables, created table, etc). - EXCLUSIVE on an object may mean it's being dropped or overwritten.*/ - String[] whereStr = { - // shared-read - " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.sharedRead() + " AND \"EX\".\"HL_LOCK_TYPE\"=" + - LockTypeUtil.exclusive() + " AND NOT (\"EX\".\"HL_TABLE\" IS NOT NULL AND \"REQ\".\"HL_TABLE\" IS NULL)", - // exclusive - " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.exclusive() + - " AND NOT (\"EX\".\"HL_TABLE\" IS NULL AND \"EX\".\"HL_LOCK_TYPE\"=" + - LockTypeUtil.sharedRead() + " AND \"REQ\".\"HL_TABLE\" IS NOT NULL)", - // shared-write - " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.sharedWrite() + " AND \"EX\".\"HL_LOCK_TYPE\" IN (" + - LockTypeUtil.exclWrite() + "," + LockTypeUtil.exclusive() + ")", - // excl-write - " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.exclWrite() + " AND \"EX\".\"HL_LOCK_TYPE\"!=" + - LockTypeUtil.sharedRead() - }; - - List subQuery = new ArrayList<>(); - for (String subCond : whereStr) { - subQuery.add("(" + sqlGenerator.addLimitClause(1, queryStr + subCond) + ")"); - } - String query = String.join(" UNION ALL ", subQuery); - - stmt = dbConn.createStatement(); - LOG.debug("Going to execute query <{}>", query); - rs = stmt.executeQuery(query); - - if (rs.next()) { - // We acquire all locks for a given query atomically; if 1 blocks, all remain in Waiting state. - LockInfo blockedBy = new LockInfo(rs); - long intLockId = rs.getLong("LOCK_INT_ID"); - char lockChar = rs.getString("LOCK_TYPE").charAt(0); - if (LOG.isDebugEnabled()) { - LOG.debug("Failure to acquire lock({} intLockId:{} {}), blocked by ({})", JavaUtils.lockIdToString(extLockId), - intLockId, JavaUtils.txnIdToString(txnId), blockedBy); - } - - LockType lockType = LockTypeUtil.getLockTypeFromEncoding(lockChar) - .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); - - if ((zeroWaitReadEnabled && LockType.SHARED_READ == lockType || isExclusiveCTAS) && isValidTxn(txnId)) { - String cleanupQuery = "DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = " + extLockId; - LOG.debug("Going to execute query: <{}>", cleanupQuery); - stmt.executeUpdate(cleanupQuery); - dbConn.commit(); - - response.setErrorMessage(String.format( - isExclusiveCTAS ? EXCL_CTAS_ERR_MSG : ZERO_WAIT_READ_ERR_MSG, blockedBy)); - response.setState(LockState.NOT_ACQUIRED); - return response; - } - String updateBlockedByQuery = "UPDATE \"HIVE_LOCKS\"" + - " SET \"HL_BLOCKEDBY_EXT_ID\" = " + blockedBy.extLockId + - ", \"HL_BLOCKEDBY_INT_ID\" = " + blockedBy.intLockId + - " WHERE \"HL_LOCK_EXT_ID\" = " + extLockId + " AND \"HL_LOCK_INT_ID\" = " + intLockId; - - LOG.debug("Going to execute query: <{}>", updateBlockedByQuery); - int updCnt = stmt.executeUpdate(updateBlockedByQuery); - - if (updCnt != 1) { - LOG.error("Failure to update lock (extLockId={}, intLockId={}) with the blocking lock's IDs " + - "(extLockId={}, intLockId={})", extLockId, intLockId, blockedBy.extLockId, blockedBy.intLockId); - shouldNeverHappen(txnId, extLockId, intLockId); - } - dbConn.commit(); - - response.setState(LockState.WAITING); - return response; - } - // If here, there were no locks that would block any item from 'locksBeingChecked' - acquire them all - acquire(dbConn, stmt, locksBeingChecked); + @Override + public CompactionResponse compact(CompactionRequest rqst) throws MetaException { + return new CompactFunction(rqst, openTxnTimeOutMillis, getMutexAPI()).execute(jdbcResource); + } - // We acquired all the locks, so commit and return acquired. - LOG.debug("Successfully acquired locks: {}", locksBeingChecked); - dbConn.commit(); - response.setState(LockState.ACQUIRED); - } finally { - close(rs, stmt, null); - } - return response; + @Override + public boolean submitForCleanup(CompactionRequest rqst, long highestWriteId, long txnId) throws MetaException { + // Put a compaction request in the queue. + long id = new GenerateCompactionQueueIdFunction().execute(jdbcResource); + jdbcResource.execute(new InsertCompactionRequestCommand(id, CompactionState.READY_FOR_CLEANING, rqst).withTxnDetails(highestWriteId, txnId)); + return true; } - private void acquire(Connection dbConn, Statement stmt, List locksBeingChecked) - throws SQLException, NoSuchLockException, MetaException { - if (locksBeingChecked == null || locksBeingChecked.isEmpty()) { - return; - } - long txnId = locksBeingChecked.get(0).txnId; - long extLockId = locksBeingChecked.get(0).extLockId; - String s = "UPDATE \"HIVE_LOCKS\" SET \"HL_LOCK_STATE\" = '" + LOCK_ACQUIRED + "', " + - //if lock is part of txn, heartbeat info is in txn record - "\"HL_LAST_HEARTBEAT\" = " + (isValidTxn(txnId) ? 0 : getEpochFn(dbProduct)) + - ",\"HL_ACQUIRED_AT\" = " + getEpochFn(dbProduct) + - ",\"HL_BLOCKEDBY_EXT_ID\"=NULL,\"HL_BLOCKEDBY_INT_ID\"=NULL" + - " WHERE \"HL_LOCK_EXT_ID\" = " + extLockId; - LOG.debug("Going to execute update <{}>", s); - int rc = stmt.executeUpdate(s); - if (rc < locksBeingChecked.size()) { - LOG.error("Failure to acquire all locks (acquired: {}, total needed: {}).", rc, locksBeingChecked.size()); - dbConn.rollback(); - /*select all locks for this ext ID and see which ones are missing*/ - String errorMsgTemplate = "No such lock(s): (%s: %s) %s"; - Set notFoundIds = locksBeingChecked.stream() - .map(lockInfo -> Long.toString(lockInfo.intLockId)) - .collect(Collectors.toSet()); - String getIntIdsQuery = "SELECT \"HL_LOCK_INT_ID\" FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = " + extLockId; - LOG.debug("Going to execute query: <{}>", getIntIdsQuery); - try (ResultSet rs = stmt.executeQuery(getIntIdsQuery)) { - while (rs.next()) { - notFoundIds.remove(rs.getString(1)); - } - } - String errorMsg = String.format(errorMsgTemplate, - JavaUtils.lockIdToString(extLockId), String.join(", ", notFoundIds), JavaUtils.txnIdToString(txnId)); - throw new NoSuchLockException(errorMsg); - } + @Override + public ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException { + return jdbcResource.execute(new ShowCompactHandler(rqst, sqlGenerator)); } /** - * Heartbeats on the lock table. This commits, so do not enter it with any state. - * Should not be called on a lock that belongs to transaction. + * We assume this is only called by metadata cache server to know if there are new base/delta files should be read. + * The query filters compactions by state and only returns SUCCEEDED or READY_FOR_CLEANING compactions because + * only these two states means there are new files ready to be read. */ - private void heartbeatLock(Connection dbConn, long extLockId) - throws NoSuchLockException, SQLException, MetaException { - // If the lock id is 0, then there are no locks in this heartbeat - if (extLockId == 0) { - return; - } - try (Statement stmt = dbConn.createStatement()) { - String updateHeartbeatQuery = "UPDATE \"HIVE_LOCKS\" SET \"HL_LAST_HEARTBEAT\" = " + - getEpochFn(dbProduct) + " WHERE \"HL_LOCK_EXT_ID\" = " + extLockId; - LOG.debug("Going to execute update <{}>", updateHeartbeatQuery); - int rc = stmt.executeUpdate(updateHeartbeatQuery); - if (rc < 1) { - LOG.error("Failure to update last heartbeat for extLockId={}.", extLockId); - dbConn.rollback(); - throw new NoSuchLockException("No such lock: " + JavaUtils.lockIdToString(extLockId)); - } - LOG.debug("Successfully heartbeated for extLockId={}", extLockId); - dbConn.commit(); - } + @Override + public GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo( + GetLatestCommittedCompactionInfoRequest rqst) throws MetaException { + return jdbcResource.execute(new GetLatestCommittedCompactionInfoHandler(rqst)); } - // Heartbeats on the txn table. This commits, so do not enter it with any state - private void heartbeatTxn(Connection dbConn, long txnid) - throws NoSuchTxnException, TxnAbortedException, SQLException, MetaException { - // If the txnid is 0, then there are no transactions in this heartbeat - if (txnid == 0) { - return; - } - try (Statement stmt = dbConn.createStatement()) { - String s = "UPDATE \"TXNS\" SET \"TXN_LAST_HEARTBEAT\" = " + getEpochFn(dbProduct) + - " WHERE \"TXN_ID\" = " + txnid + " AND \"TXN_STATE\" = " + TxnStatus.OPEN; - LOG.debug("Going to execute update <{}>", s); - int rc = stmt.executeUpdate(s); - if (rc < 1) { - ensureValidTxn(dbConn, txnid, stmt); // This should now throw some useful exception. - LOG.error("Can neither heartbeat txn (txnId={}) nor confirm it as invalid.", txnid); - dbConn.rollback(); - throw new NoSuchTxnException("No such txn: " + txnid); - } - LOG.debug("Successfully heartbeated for txnId={}", txnid); - dbConn.commit(); - } + @Override + public MetricsInfo getMetricsInfo() throws MetaException { + int threshold = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_TABLES_WITH_ABORTED_TXNS_THRESHOLD); + MetricsInfo metrics = jdbcResource.execute(MetricsInfoHandler.INSTANCE); + Set resourceNames = jdbcResource.execute(new TablesWithAbortedTxnsHandler(threshold)); + metrics.setTablesWithXAbortedTxnsCount(resourceNames.size()); + metrics.setTablesWithXAbortedTxns(resourceNames); + return metrics; } /** - * Returns the state of the transaction if it's able to determine it. Some cases where it cannot: - * 1. txnid was Aborted/Committed and then GC'd (compacted) - * 2. txnid was committed but it didn't modify anything (nothing in COMPLETED_TXN_COMPONENTS) + * Retry-by-caller note: + * This may be retried after dbConn.commit. At worst, it will create duplicate entries in + * TXN_COMPONENTS which won't affect anything. See more comments in {@link #commitTxn(CommitTxnRequest)} */ - private TxnStatus findTxnState(long txnid, Statement stmt) throws SQLException, MetaException { - String s = "SELECT \"TXN_STATE\" FROM \"TXNS\" WHERE \"TXN_ID\" = " + txnid; - LOG.debug("Going to execute query <{}>", s); - try (ResultSet rs = stmt.executeQuery(s)) { - if (!rs.next()) { - s = - sqlGenerator.addLimitClause(1, "1 FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_TXNID\" = " - + txnid); - LOG.debug("Going to execute query <{}>", s); - try (ResultSet rs2 = stmt.executeQuery(s)) { - if (rs2.next()) { - return TxnStatus.COMMITTED; - } - } - // could also check WRITE_SET but that seems overkill - return TxnStatus.UNKNOWN; - } - return TxnStatus.fromString(rs.getString(1)); - } + @Override + public void addDynamicPartitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, MetaException { + TxnType txnType = jdbcResource.execute(new GetOpenTxnTypeAndLockHandler(sqlGenerator, rqst.getTxnid())); + if (txnType == null) { + //ensures txn is still there and in expected state + new EnsureValidTxnFunction(rqst.getTxnid()).execute(jdbcResource); + shouldNeverHappen(rqst.getTxnid()); + } + jdbcResource.execute(new InsertTxnComponentsCommand(rqst)); + jdbcResource.getJdbcTemplate().update("DELETE FROM \"TXN_COMPONENTS\" " + + "WHERE \"TC_TXNID\" = :txnId AND \"TC_DATABASE\" = :dbName AND \"TC_TABLE\" = :tableName AND \"TC_PARTITION\" IS NULL", + new MapSqlParameterSource() + .addValue("txnId", rqst.getTxnid()) + .addValue("dbName", org.apache.commons.lang3.StringUtils.lowerCase(rqst.getDbname())) + .addValue("tableName", org.apache.commons.lang3.StringUtils.lowerCase(rqst.getTablename()))); } /** - * Checks if all the txns in the list are in open state and not read-only. - * @param txnIds list of txns to be evaluated for open state/read-only status - * @param stmt db statement - * @return If all the txns in open state and not read-only, then return true else false + * Clean up corresponding records in metastore tables when corresponding object is dropped, + * specifically: TXN_COMPONENTS, COMPLETED_TXN_COMPONENTS, COMPACTION_QUEUE, COMPLETED_COMPACTIONS + * Retry-by-caller note: this is only idempotent assuming it's only called by dropTable/Db/etc + * operations. + *

+ * HIVE_LOCKS and WS_SET are cleaned up by {@link AcidHouseKeeperService}, if turned on */ - private boolean isTxnsOpenAndNotReadOnly(List txnIds, Statement stmt) throws SQLException { - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - - // Get the count of txns from the given list that are in open state and not read-only. - // If the returned count is same as the input number of txns, then all txns are in open state and not read-only. - prefix.append("SELECT COUNT(*) FROM \"TXNS\" WHERE \"TXN_STATE\" = " + TxnStatus.OPEN - + " AND \"TXN_TYPE\" != " + TxnType.READ_ONLY.getValue() + " AND "); - - TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), - txnIds, "\"TXN_ID\"", false, false); - - long count = 0; - for (String query : queries) { - LOG.debug("Going to execute query <{}>", query); - try (ResultSet rs = stmt.executeQuery(query)) { - if (rs.next()) { - count += rs.getLong(1); - } - } - } - return count == txnIds.size(); + @Override + public void cleanupRecords(HiveObjectType type, Database db, Table table, + Iterator partitionIterator, boolean keepTxnToWriteIdMetaData) throws MetaException { + new CleanupRecordsFunction(type, db, table, partitionIterator, getDefaultCatalog(conf), keepTxnToWriteIdMetaData, null) + .execute(jdbcResource); } - /** - * Get txns from the list that are either aborted or read-only. - * @param txnIds list of txns to be evaluated for aborted state/read-only status - * @param stmt db statement - */ - private String getAbortedAndReadOnlyTxns(List txnIds, Statement stmt) throws SQLException { - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - - // Check if any of the txns in the list are either aborted or read-only. - prefix.append("SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\" FROM \"TXNS\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), - txnIds, "\"TXN_ID\"", false, false); - StringBuilder txnInfo = new StringBuilder(); - - for (String query : queries) { - LOG.debug("Going to execute query <{}>", query); - try (ResultSet rs = stmt.executeQuery(query)) { - while (rs.next()) { - long txnId = rs.getLong(1); - TxnStatus txnState = TxnStatus.fromString(rs.getString(2)); - TxnType txnType = TxnType.findByValue(rs.getInt(3)); - - if (txnState != TxnStatus.OPEN) { - txnInfo.append("{").append(txnId).append(",").append(txnState).append("}"); - } else if (txnType == TxnType.READ_ONLY) { - txnInfo.append("{").append(txnId).append(",read-only}"); - } - } - } - } - return txnInfo.toString(); + @Override + public void cleanupRecords(HiveObjectType type, Database db, Table table, + Iterator partitionIterator, long txnId) throws MetaException { + new CleanupRecordsFunction(type, db, table, partitionIterator, getDefaultCatalog(conf), false, txnId) + .execute(jdbcResource); } - + /** - * Get txns from the list that are committed. - * @param txnIds list of txns to be evaluated for committed state - * @param stmt db statement + * Catalog hasn't been added to transactional tables yet, so it's passed in but not used. */ - private String getCommittedTxns(List txnIds, Statement stmt) throws SQLException { - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - - // Check if any of the txns in the list are committed. - prefix.append("SELECT \"CTC_TXNID\" FROM \"COMPLETED_TXN_COMPONENTS\" WHERE "); - TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), - txnIds, "\"CTC_TXNID\"", false, false); - StringBuilder txnInfo = new StringBuilder(); - - for (String query : queries) { - LOG.debug("Going to execute query <{}>", query); - try (ResultSet rs = stmt.executeQuery(query)) { - while (rs.next()) { - long txnId = rs.getLong(1); - txnInfo.append("{").append(txnId).append(",c}"); - } - } - } - return txnInfo.toString(); + @Override + public void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName, + String newCatName, String newDbName, String newTabName, String newPartName) + throws MetaException { + new OnRenameFunction(oldCatName, oldDbName, oldTabName, oldPartName, newCatName, newDbName, newTabName, newPartName).execute(jdbcResource); } - + /** - * Used to raise an informative error when the caller expected a txn in a particular TxnStatus - * but found it in some other status + * TODO: remove in future, for testing only, do not use. */ - private static void raiseTxnUnexpectedState(TxnStatus actualStatus, long txnid) - throws NoSuchTxnException, TxnAbortedException { - switch (actualStatus) { - case ABORTED: - throw new TxnAbortedException("Transaction " + JavaUtils.txnIdToString(txnid) + " already aborted"); - case COMMITTED: - throw new NoSuchTxnException("Transaction " + JavaUtils.txnIdToString(txnid) + " is already committed."); - case UNKNOWN: - throw new NoSuchTxnException("No such transaction " + JavaUtils.txnIdToString(txnid)); - case OPEN: - throw new NoSuchTxnException(JavaUtils.txnIdToString(txnid) + " is " + TxnStatus.OPEN); - default: - throw new IllegalArgumentException("Unknown TxnStatus " + actualStatus); - } + @VisibleForTesting + @Override + public int getNumLocks() { + return Objects.requireNonNull( + jdbcResource.getJdbcTemplate().queryForObject("SELECT COUNT(*) FROM \"HIVE_LOCKS\"", new MapSqlParameterSource(), Integer.TYPE), + "This never should be null, it's just to suppress warnings"); } + /** - * Returns the state of the transaction with {@code txnid} or throws if {@code raiseError} is true. + * TODO: remove in future, for testing only, do not use. */ - private static void ensureValidTxn(Connection dbConn, long txnid, Statement stmt) - throws SQLException, NoSuchTxnException, TxnAbortedException { - // We need to check whether this transaction is valid and open - String s = "SELECT \"TXN_STATE\" FROM \"TXNS\" WHERE \"TXN_ID\" = " + txnid; - LOG.debug("Going to execute query <{}>", s); - try (ResultSet rs = stmt.executeQuery(s)) { - if (!rs.next()) { - // todo: add LIMIT 1 instead of count - should be more efficient - s = "SELECT COUNT(*) FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_TXNID\" = " + txnid; - try (ResultSet rs2 = stmt.executeQuery(s)) { - // todo: strictly speaking you can commit an empty txn, thus 2nd conjunct is wrong but - // only - // possible for for multi-stmt txns - boolean alreadyCommitted = rs2.next() && rs2.getInt(1) > 0; - LOG.debug("Going to rollback"); - rollbackDBConn(dbConn); - if (alreadyCommitted) { - // makes the message more informative - helps to find bugs in client code - throw new NoSuchTxnException("Transaction " + JavaUtils.txnIdToString(txnid) - + " is already committed."); - } - throw new NoSuchTxnException("No such transaction " + JavaUtils.txnIdToString(txnid)); - } - } - if (TxnStatus.fromString(rs.getString(1)) == TxnStatus.ABORTED) { - LOG.debug("Going to rollback"); - rollbackDBConn(dbConn); - throw new TxnAbortedException("Transaction " + JavaUtils.txnIdToString(txnid) - + " already aborted");// todo: add time of abort, which is not currently tracked. - // Requires schema change - } - } - } - - private Optional getLockFromLockId(Connection dbConn, long extLockId) throws MetaException, SQLException { - try (PreparedStatement pstmt = dbConn.prepareStatement(SELECT_LOCKS_FOR_LOCK_ID_QUERY)) { - pstmt.setLong(1, extLockId); - LOG.debug("Going to execute query <{}> for extLockId={}", SELECT_LOCKS_FOR_LOCK_ID_QUERY, extLockId); - try (ResultSet rs = pstmt.executeQuery()) { - if (!rs.next()) { - return Optional.empty(); - } - LockInfo info = new LockInfo(rs); - LOG.debug("getTxnIdFromLockId({}) Return {}", extLockId, JavaUtils.txnIdToString(info.txnId)); - return Optional.of(info); - } - } - } - - // NEVER call this function without first calling heartbeat(long, long) - private List getLocksFromLockId(Connection dbConn, long extLockId) throws MetaException, SQLException { - try (PreparedStatement pstmt = dbConn.prepareStatement(SELECT_LOCKS_FOR_LOCK_ID_QUERY)) { - List locks = new ArrayList<>(); - pstmt.setLong(1, extLockId); - LOG.debug("Going to execute query <{}> for extLockId={}", SELECT_LOCKS_FOR_LOCK_ID_QUERY, extLockId); - try (ResultSet rs = pstmt.executeQuery()) { - while (rs.next()) { - locks.add(new LockInfo(rs)); - } - } - if (locks.isEmpty()) { - throw new MetaException("This should never happen! We already " + - "checked the lock(" + JavaUtils.lockIdToString(extLockId) + ") existed but now we can't find it!"); - } - LOG.debug("Found {} locks for extLockId={}. Locks: {}", locks.size(), extLockId, locks); - return locks; - } + @VisibleForTesting + @Override + public long setTimeout(long milliseconds) { + long previous_timeout = timeout; + timeout = milliseconds; + return previous_timeout; } - // Clean time out locks from the database not associated with a transactions, i.e. locks - // for read-only autoCommit=true statements. This does a commit, - // and thus should be done before any calls to heartbeat that will leave - // open transactions. - private void timeOutLocks() { - //doing a SELECT first is less efficient but makes it easier to debug things - //when txnid is <> 0, the lock is associated with a txn and is handled by performTimeOuts() - //want to avoid expiring locks for a txn w/o expiring the txn itself + protected Connection getDbConn(int isolationLevel, DataSource connPool) throws SQLException { + Connection dbConn = null; try { - Set timedOutLockIds = new TreeSet<>( - jdbcResource.getJdbcTemplate().query(String.format(SELECT_TIMED_OUT_LOCKS_QUERY, getEpochFn(dbProduct)), - new MapSqlParameterSource().addValue("timeout", timeout), - (rs, rowNum) -> rs.getLong(1))); - if (timedOutLockIds.isEmpty()) { - LOG.debug("Did not find any timed-out locks, therefore retuning."); - return; + dbConn = connPool.getConnection(); + dbConn.setAutoCommit(false); + dbConn.setTransactionIsolation(isolationLevel); + return dbConn; + } catch (SQLException e) { + if (dbConn != null) { + dbConn.close(); } - - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder(); - StringBuilder suffix = new StringBuilder(); - - //include same hl_last_heartbeat condition in case someone heartbeated since the select - prefix.append("DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_LAST_HEARTBEAT\" < "); - prefix.append(getEpochFn(dbProduct)).append("-").append(timeout); - prefix.append(" AND \"HL_TXNID\" = 0 AND "); - - TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, timedOutLockIds, - "\"HL_LOCK_EXT_ID\"", true, false); - - int deletedLocks = 0; - for (String query : queries) { - LOG.debug("Going to execute update: <{}>", query); - deletedLocks += jdbcResource.getJdbcTemplate().update(query, new MapSqlParameterSource()); - } - if (deletedLocks > 0) { - LOG.info("Deleted {} locks due to timed-out. Lock ids: {}", deletedLocks, timedOutLockIds); - } - } catch (Exception ex) { - LOG.error("Failed to purge timed-out locks: " + ex.getMessage(), ex); + throw e; } } @@ -5783,263 +959,22 @@ private void timeOutLocks() { * Will also delete locks which are not associated with a transaction and have timed out * Tries to keep transactions (against metastore db) small to reduce lock contention. */ - @RetrySemantics.Idempotent + @Override public void performTimeOuts() { - jdbcResource.bindDataSource(POOL_TX); - try (TransactionContext context = jdbcResource.getTransactionManager().getTransaction(PROPAGATION_REQUIRED)) { - //We currently commit after selecting the TXNS to abort. So whether SERIALIZABLE - //READ_COMMITTED, the effect is the same. We could use FOR UPDATE on Select from TXNS - //and do the whole performTimeOuts() in a single huge transaction, but the only benefit - //would be to make sure someone cannot heartbeat one of these txns at the same time. - //The attempt to heartbeat would block and fail immediately after it's unblocked. - //With current (RC + multiple txns) implementation it is possible for someone to send - //heartbeat at the very end of the expiry interval, and just after the Select from TXNS - //is made, in which case heartbeat will succeed but txn will still be Aborted. - //Solving this corner case is not worth the perf penalty. The client should heartbeat in a - //timely way. - timeOutLocks(); - while (true) { - String s = " \"TXN_ID\" FROM \"TXNS\" WHERE \"TXN_STATE\" = " + TxnStatus.OPEN + - " AND (" + - "\"TXN_TYPE\" != " + TxnType.REPL_CREATED.getValue() + - " AND \"TXN_LAST_HEARTBEAT\" < " + getEpochFn(dbProduct) + "-" + timeout + - " OR " + - " \"TXN_TYPE\" = " + TxnType.REPL_CREATED.getValue() + - " AND \"TXN_LAST_HEARTBEAT\" < " + getEpochFn(dbProduct) + "-" + replicationTxnTimeout + - ")"; - //safety valve for extreme cases - s = sqlGenerator.addLimitClause(10 * TIMED_OUT_TXN_ABORT_BATCH_SIZE, s); - - LOG.debug("Going to execute query <{}>", s); - List> timedOutTxns = jdbcResource.getJdbcTemplate().query(s, rs -> { - List> txnbatch = new ArrayList<>(); - List currentBatch = new ArrayList<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE); - while (rs.next()) { - currentBatch.add(rs.getLong(1)); - if (currentBatch.size() == TIMED_OUT_TXN_ABORT_BATCH_SIZE) { - txnbatch.add(currentBatch); - currentBatch = new ArrayList<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE); - } - } - if (currentBatch.size() > 0) { - txnbatch.add(currentBatch); - } - return txnbatch; - }); - //noinspection DataFlowIssue - if (timedOutTxns.size() == 0) { - jdbcResource.getTransactionManager().commit(context); - return; - } - - Object savePoint = context.getTransactionStatus().createSavepoint(); - - int numTxnsAborted = 0; - for (List batchToAbort : timedOutTxns) { - context.getTransactionStatus().releaseSavepoint(savePoint); - savePoint = context.getTransactionStatus().createSavepoint(); - if (abortTxns(jdbcResource.getConnection(), batchToAbort, true, false, false, TxnErrorMsg.ABORT_TIMEOUT) == batchToAbort.size()) { - numTxnsAborted += batchToAbort.size(); - //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout' - } else { - //could not abort all txns in this batch - this may happen because in parallel with this - //operation there was activity on one of the txns in this batch (commit/abort/heartbeat) - //This is not likely but may happen if client experiences long pause between heartbeats or - //unusually long/extreme pauses between heartbeat() calls and other logic in checkLock(), - //lock(), etc. - context.getTransactionStatus().rollbackToSavepoint(savePoint); - } - } - LOG.info("Aborted {} transaction(s) due to timeout", numTxnsAborted); - if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) { - Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_TIMED_OUT_TXNS).inc(numTxnsAborted); - } - } - } catch (MetaException | SQLException e) { - LOG.warn("Aborting timed out transactions failed due to " + e.getMessage(), e); - } finally { - jdbcResource.unbindDataSource(); - } + new PerformTimeoutsFunction(timeout, replicationTxnTimeout, transactionalListeners).execute(jdbcResource); } @Override - @RetrySemantics.ReadOnly public void countOpenTxns() throws MetaException { - Connection dbConn = null; - Statement stmt = null; - ResultSet rs = null; - try { - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - stmt = dbConn.createStatement(); - String s = "SELECT COUNT(*) FROM \"TXNS\" WHERE \"TXN_STATE\" = " + TxnStatus.OPEN; - LOG.debug("Going to execute query <{}>", s); - rs = stmt.executeQuery(s); - if (!rs.next()) { - LOG.error("Transaction database not properly configured, can't find txn_state from TXNS."); - } else { - Long numOpen = rs.getLong(1); - if (numOpen > Integer.MAX_VALUE) { - LOG.error("Open transaction count above {}, can't count that high!", Integer.MAX_VALUE); - } else { - numOpenTxns.set(numOpen.intValue()); - } - } - } catch (SQLException e) { - LOG.info("Failed to update number of open transactions"); - checkRetryable(e, "countOpenTxns()"); - } finally { - close(rs, stmt, dbConn); - } - } catch (RetryException e) { - countOpenTxns(); - } - } - - /** - * Add min history level entry for each generated txn record - * @param dbConn Connection - * @param txnIds new transaction ids - * @deprecated Remove this method when min_history_level table is dropped - * @throws SQLException ex - */ - @Deprecated - private void addTxnToMinHistoryLevel(Connection dbConn, List txnIds, long minOpenTxnId) throws SQLException { - if (!useMinHistoryLevel) { - return; - } - // Need to register minimum open txnid for current transactions into MIN_HISTORY table. - try (Statement stmt = dbConn.createStatement()) { - List rows = txnIds.stream().map(txnId -> txnId + ", " + minOpenTxnId).collect(Collectors.toList()); - - // Insert transaction entries into MIN_HISTORY_LEVEL. - List inserts = - sqlGenerator.createInsertValuesStmt("\"MIN_HISTORY_LEVEL\" (\"MHL_TXNID\", \"MHL_MIN_OPEN_TXNID\")", rows); - for (String insert : inserts) { - LOG.debug("Going to execute insert <{}>", insert); - stmt.execute(insert); - } - LOG.info("Added entries to MIN_HISTORY_LEVEL for current txns: ({}) with min_open_txn: {}", txnIds, minOpenTxnId); - } catch (SQLException e) { - if (dbProduct.isTableNotExistsError(e)) { - // If the table does not exists anymore, we disable the flag and start to work the new way - // This enables to switch to the new functionality without a restart - useMinHistoryLevel = false; - } else { - throw e; - } + int openTxns = jdbcResource.execute(new CountOpenTxnsHandler()); + if (openTxns > -1) { + numOpenTxns.set(openTxns); } } @Override - @RetrySemantics.SafeToRetry public void addWriteIdsToMinHistory(long txnid, Map minOpenWriteIds) throws MetaException { - if (!useMinHistoryWriteId) { - return; - } - // Need to register minimum open writeId for current transactions into MIN_HISTORY_WRITE_ID table. - try { - Connection dbConn = null; - try { - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - try (PreparedStatement pstmt = dbConn.prepareStatement(MIN_HISTORY_WRITE_ID_INSERT_QUERY)) { - int writeId = 0; - - for (Map.Entry validWriteId : minOpenWriteIds.entrySet()) { - String[] names = TxnUtils.getDbTableName(validWriteId.getKey()); - - pstmt.setLong(1, txnid); - pstmt.setString(2, names[0]); - pstmt.setString(3, names[1]); - pstmt.setLong(4, validWriteId.getValue()); - - pstmt.addBatch(); - writeId++; - if (writeId % maxBatchSize == 0) { - LOG.debug("Executing a batch of <" + TXN_TO_WRITE_ID_INSERT_QUERY + "> queries. " + - "Batch size: " + maxBatchSize); - pstmt.executeBatch(); - } - } - if (writeId % maxBatchSize != 0) { - LOG.debug("Executing a batch of <" + TXN_TO_WRITE_ID_INSERT_QUERY + "> queries. " + - "Batch size: " + writeId % maxBatchSize); - pstmt.executeBatch(); - } - } - dbConn.commit(); - LOG.info("Added entries to MIN_HISTORY_WRITE_ID for current txn: {} with min_open_write_ids: ({})", txnid, minOpenWriteIds); - } catch (SQLException e) { - if (dbProduct.isTableNotExistsError(e)) { - // If the table does not exists anymore, we disable the flag and start to work the new way - // This enables to switch to the new functionality without a restart - useMinHistoryWriteId = false; - } else { - LOG.error("Caught exception while storing minOpenWriteIds: ", e); - rollbackDBConn(dbConn); - checkRetryable(e, "addWriteIdsToMinHistory"); - throw new MetaException(e.getMessage()); - } - } finally { - closeDbConn(dbConn); - } - } catch (RetryException e) { - addWriteIdsToMinHistory(txnid, minOpenWriteIds); - } - } - - /** - * Remove txns from min_history_level table - * @param dbConn connection - * @param txnids transactions - * @deprecated Remove this method when min_history_level table is dropped - */ - @Deprecated - private void removeTxnsFromMinHistoryLevel(Connection dbConn, List txnids) throws SQLException { - if (!useMinHistoryLevel) { - return; - } - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder("DELETE FROM \"MIN_HISTORY_LEVEL\" WHERE "); - try (Statement stmt = dbConn.createStatement()) { - TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), txnids, "\"MHL_TXNID\"", false, false); - executeQueriesInBatchNoCount(dbProduct, stmt, queries, maxBatchSize); - LOG.info("Removed transactions: ({}) from MIN_HISTORY_LEVEL", txnids); - } catch (SQLException e) { - if (dbProduct.isTableNotExistsError(e)) { - // If the table does not exists anymore, we disable the flag and start to work the new way - // This enables to switch to the new functionality without a restart - useMinHistoryLevel = false; - } else { - throw e; - } - } - } - - /** - * Remove minOpenWriteIds from min_history_write_id tables - * @param dbConn connection - * @param txnids transactions - */ - private void removeWriteIdsFromMinHistory(Connection dbConn, List txnids) throws SQLException { - if (!useMinHistoryWriteId) { - return; - } - List queries = new ArrayList<>(); - StringBuilder prefix = new StringBuilder("DELETE FROM \"MIN_HISTORY_WRITE_ID\" WHERE "); - try (Statement stmt = dbConn.createStatement()) { - TxnUtils.buildQueryWithINClause(conf, queries, prefix, new StringBuilder(), txnids, "\"MH_TXNID\"", false, false); - executeQueriesInBatchNoCount(dbProduct, stmt, queries, maxBatchSize); - LOG.info("Removed transactions: ({}) from MIN_HISTORY_WRITE_ID", txnids); - } catch (SQLException e) { - if (dbProduct.isTableNotExistsError(e)) { - // If the table does not exists anymore, we disable the flag and start to work the new way - // This enables to switch to the new functionality without a restart - useMinHistoryWriteId = false; - } else { - throw e; - } - } + jdbcResource.execute(new AddWriteIdsToMinHistoryCommand(txnid, minOpenWriteIds)); } protected synchronized static DataSource setupJdbcConnectionPool(Configuration conf, int maxPoolSize) { @@ -6052,450 +987,154 @@ protected synchronized static DataSource setupJdbcConnectionPool(Configuration c throw new RuntimeException(e); } } else { - String connectionPooler = MetastoreConf.getVar(conf, ConfVars.CONNECTION_POOLING_TYPE).toLowerCase(); + String connectionPooler = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_POOLING_TYPE).toLowerCase(); if ("none".equals(connectionPooler)) { LOG.info("Choosing not to pool JDBC connections"); - return new NoPoolConnectionPool(conf); + return new NoPoolConnectionPool(conf, dbProduct); } else { throw new RuntimeException("Unknown JDBC connection pooling " + connectionPooler); } } } - /** - * Returns true if {@code ex} should be retried - */ - static boolean isRetryable(Configuration conf, Exception ex) { - if(ex instanceof SQLException) { - SQLException sqlException = (SQLException)ex; - if (MANUAL_RETRY.equalsIgnoreCase(sqlException.getSQLState())) { - // Manual retry exception was thrown - return true; - } - if ("08S01".equalsIgnoreCase(sqlException.getSQLState())) { - //in MSSQL this means Communication Link Failure - return true; - } - if ("ORA-08176".equalsIgnoreCase(sqlException.getSQLState()) || - sqlException.getMessage().contains("consistent read failure; rollback data not available")) { - return true; - } - - String regex = MetastoreConf.getVar(conf, ConfVars.TXN_RETRYABLE_SQLEX_REGEX); - if (regex != null && !regex.isEmpty()) { - String[] patterns = regex.split(",(?=\\S)"); - String message = getMessage((SQLException)ex); - for (String p : patterns) { - if (Pattern.matches(p, message)) { - return true; - } - } - } - //see also https://issues.apache.org/jira/browse/HIVE-9938 - } - return false; - } - - private boolean isDuplicateKeyError(SQLException ex) { - return dbProduct.isDuplicateKeyError(ex); + @Override + public MutexAPI getMutexAPI() { + return mutexAPI; } - private static String getMessage(SQLException ex) { - return ex.getMessage() + " (SQLState=" + ex.getSQLState() + ", ErrorCode=" + ex.getErrorCode() + ")"; - } - static String quoteString(String input) { - return "'" + input + "'"; - } - static String quoteChar(char c) { - return "'" + c + "'"; + @Override + public LockHandle acquireLock(String key) throws MetaException { + return mutexAPI.acquireLock(key); } - /** - * {@link #lockInternal()} and {@link #unlockInternal()} are used to serialize those operations that require - * Select ... For Update to sequence operations properly. In practice that means when running - * with Derby database. See more notes at class level. - */ - protected void lockInternal() { - if(dbProduct.isDERBY()) { - derbyLock.lock(); - } - } - protected void unlockInternal() { - if(dbProduct.isDERBY()) { - derbyLock.unlock(); - } - } @Override - @RetrySemantics.Idempotent - public MutexAPI getMutexAPI() { - return this; + public void acquireLock(String key, LockHandle handle) throws MetaException { + mutexAPI.acquireLock(key, handle); } @Override - public LockHandle acquireLock(String key) throws MetaException { - /** - * The implementation here is a bit kludgey but done so that code exercised by unit tests - * (which run against Derby which has no support for select for update) is as similar to - * production code as possible. - * In particular, with Derby we always run in a single process with a single metastore and - * the absence of For Update is handled via a Semaphore. The later would strictly speaking - * make the SQL statements below unnecessary (for Derby), but then they would not be tested. - */ - Connection dbConn = null; - Statement stmt = null; - ResultSet rs = null; - boolean needToCloseConn = true; - try { - try { - String sqlStmt = sqlGenerator.addForUpdateClause("SELECT \"MT_COMMENT\", \"MT_KEY2\" FROM \"AUX_TABLE\" WHERE \"MT_KEY1\"=" + quoteString(key)); - lockInternal(); - dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED, connPoolMutex); - stmt = dbConn.createStatement(); - LOG.debug("About to execute SQL: {}", sqlStmt); - rs = stmt.executeQuery(sqlStmt); - if (!rs.next()) { - close(rs); - try { - stmt.executeUpdate("INSERT INTO \"AUX_TABLE\" (\"MT_KEY1\", \"MT_KEY2\") VALUES(" + quoteString(key) + ", 0)"); - dbConn.commit(); - } catch (SQLException ex) { - if (!isDuplicateKeyError(ex)) { - throw new RuntimeException("Unable to lock " + quoteString(key) + " due to: " + getMessage(ex), ex); - } - //if here, it means a concrurrent acquireLock() inserted the 'key' - - //rollback is done for the benefit of Postgres which throws (SQLState=25P02, ErrorCode=0) if - //you attempt any stmt in a txn which had an error. - dbConn.rollback(); - } - rs = stmt.executeQuery(sqlStmt); - if (!rs.next()) { - throw new IllegalStateException("Unable to lock " + quoteString(key) + ". Expected row in AUX_TABLE is missing."); - } - } - Semaphore derbySemaphore = null; - if(dbProduct.isDERBY()) { - derbyKey2Lock.putIfAbsent(key, new Semaphore(1)); - derbySemaphore = derbyKey2Lock.get(key); - derbySemaphore.acquire(); - } - if (LOG.isDebugEnabled()) { - LOG.debug("{} locked by {}", quoteString(key), quoteString(TxnHandler.hostname)); - } - needToCloseConn = false; //The connection is good, we need not close it - //OK, so now we have a lock - return new LockHandleImpl(dbConn, stmt, rs, key, derbySemaphore); - } catch (SQLException ex) { - checkRetryable(ex, "acquireLock(" + key + ")"); - throw new MetaException("Unable to lock " + quoteString(key) + " due to: " + getMessage(ex) + "; " + StringUtils.stringifyException(ex)); - } - catch(InterruptedException ex) { - throw new MetaException("Unable to lock " + quoteString(key) + " due to: " + ex.getMessage() + StringUtils.stringifyException(ex)); - } - finally { - if (needToCloseConn) { - rollbackDBConn(dbConn); - close(rs, stmt, dbConn); - } - unlockInternal(); - } - } - catch(RetryException ex) { - return acquireLock(key); + public AbortCompactResponse abortCompactions(AbortCompactionRequest reqst) throws MetaException, NoSuchCompactionException { + if (reqst.getCompactionIds().isEmpty()) { + LOG.info("Compaction ids are missing in request. No compactions to abort"); + throw new NoSuchCompactionException("Compaction ids missing in request. No compactions to abort"); } + return new AbortCompactionFunction(reqst, sqlRetryHandler).execute(jdbcResource); } - @Override - public void acquireLock(String key, LockHandle handle) { - //the idea is that this will use LockHandle.dbConn - throw new NotImplementedException("acquireLock(String, LockHandle) is not implemented"); + private static void shouldNeverHappen(long txnid) { + throw new RuntimeException("This should never happen: " + JavaUtils.txnIdToString(txnid)); + } + + private void deleteInvalidOpenTransactions(List txnIds) throws MetaException { + try { + sqlRetryHandler.executeWithRetry(new SqlRetryCallProperties().withCallerId("deleteInvalidOpenTransactions"), + () -> { + jdbcResource.execute(new DeleteInvalidOpenTxnsCommand(txnIds)); + LOG.info("Removed transactions: ({}) from TXNS", txnIds); + jdbcResource.execute(new RemoveTxnsFromMinHistoryLevelCommand(txnIds)); + return null; + }); + } catch (TException e) { + throw new MetaException(e.getMessage()); + } } /** * Acquire the global txn lock, used to mutex the openTxn and commitTxn. * @param shared either SHARED_READ or EXCLUSIVE - * @throws SQLException */ - private void acquireTxnLock(Statement stmt, boolean shared) throws SQLException, MetaException { + private void acquireTxnLock(boolean shared) throws MetaException { String sqlStmt = sqlGenerator.createTxnLockStatement(shared); - stmt.execute(sqlStmt); - LOG.debug("TXN lock locked by {} in mode {}", quoteString(TxnHandler.hostname), shared); + jdbcResource.getJdbcTemplate().getJdbcTemplate().execute((Statement stmt) -> { + stmt.execute(sqlStmt); + return null; + }); + LOG.debug("TXN lock locked by '{}' in mode {}", TxnHandler.hostname, shared); } - private static final class LockHandleImpl implements LockHandle { - private final Connection dbConn; - private final Statement stmt; - private final ResultSet rs; - private final Semaphore derbySemaphore; - private final String key; - private final Long lastUpdateTime; - - LockHandleImpl(Connection conn, Statement stmt, ResultSet rs, String key, Semaphore derbySemaphore) { - this.dbConn = conn; - this.stmt = stmt; - this.rs = rs; - this.derbySemaphore = derbySemaphore; - if(derbySemaphore != null) { - //oterwise it may later release permit acquired by someone else - assert derbySemaphore.availablePermits() == 0 : "Expected locked Semaphore"; - } - this.key = key; - Long lastUpdateTime; - try { - lastUpdateTime = rs.getLong("MT_KEY2"); - } catch (SQLException e) { - LOG.warn("Couldn't resolve MT_KEY2 for MT_KEY1=" + quoteString(this.key), e); - lastUpdateTime = -1L; - } - this.lastUpdateTime = lastUpdateTime; - } - - @Override - public void releaseLocks() { - rollbackDBConn(dbConn); - TxnHandler.close(rs, stmt, dbConn); - if(derbySemaphore != null) { - derbySemaphore.release(); - } - if (LOG.isDebugEnabled()) { - LOG.debug("{} unlocked by {}", quoteString(key), quoteString(TxnHandler.hostname)); - } - } - - @Override - public Long getLastUpdateTime() { - return lastUpdateTime; - } + /** + * Determine the current time, using the RDBMS as a source of truth + * @return current time in milliseconds + * @throws org.apache.hadoop.hive.metastore.api.MetaException if the time cannot be determined + */ + protected Timestamp getDbTime() throws MetaException { + return jdbcResource.getJdbcTemplate().queryForObject( + dbProduct.getDBTime(), + new MapSqlParameterSource(), + (ResultSet rs, int rowNum) -> rs.getTimestamp(1)); + } - @Override - public void releaseLocks(Long timestamp) { - try { - stmt.executeUpdate("UPDATE \"AUX_TABLE\" SET \"MT_KEY2\" = "+ timestamp + " WHERE \"MT_KEY1\"=" + quoteString(key)); - dbConn.commit(); - } catch (SQLException ex) { - LOG.warn("Unable to update MT_KEY2 value for MT_KEY1=" + key, ex); - rollbackDBConn(dbConn); - } - TxnHandler.close(rs, stmt, dbConn); - if(derbySemaphore != null) { - derbySemaphore.release(); - } - if (LOG.isDebugEnabled()) { - LOG.debug("{} unlocked by {}", quoteString(key), quoteString(TxnHandler.hostname)); + private void determineDatabaseProduct(Connection conn) { + try { + String s = conn.getMetaData().getDatabaseProductName(); + dbProduct = DatabaseProduct.determineDatabaseProduct(s, conf); + if (dbProduct.isUNDEFINED()) { + String msg = "Unrecognized database product name <" + s + ">"; + LOG.error(msg); + throw new IllegalStateException(msg); } + } catch (SQLException e) { + String msg = "Unable to get database product name"; + LOG.error(msg, e); + throw new IllegalStateException(msg, e); } - - @Override - public void close() { - releaseLocks(); - } - } - - - private static class NoPoolConnectionPool implements DataSource { - // Note that this depends on the fact that no-one in this class calls anything but - // getConnection. If you want to use any of the Logger or wrap calls you'll have to - // implement them. - private final Configuration conf; - private Driver driver; - private String connString; - private String user; - private String passwd; - - public NoPoolConnectionPool(Configuration conf) { - this.conf = conf; + + private void initJdbcResource() { + if (jdbcResource == null) { + jdbcResource = new MultiDataSourceJdbcResource(dbProduct, conf, sqlGenerator); + jdbcResource.registerDataSource(POOL_TX, connPool); + jdbcResource.registerDataSource(POOL_MUTEX, connPoolMutex); + jdbcResource.registerDataSource(POOL_COMPACTOR, connPoolCompactor); } + } - @Override - public Connection getConnection() throws SQLException { - if (user == null) { - user = DataSourceProvider.getMetastoreJdbcUser(conf); - passwd = DataSourceProvider.getMetastoreJdbcPasswd(conf); - } - return getConnection(user, passwd); + /** + * Check if provided table is usable + * @return + */ + private boolean checkIfTableIsUsable(String tableName, boolean configValue) { + if (!configValue) { + // don't check it if disabled + return false; } - - @Override - public Connection getConnection(String username, String password) throws SQLException { - // Find the JDBC driver - if (driver == null) { - String driverName = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER); - if (driverName == null || driverName.equals("")) { - String msg = "JDBC driver for transaction db not set in configuration " + - "file, need to set " + ConfVars.CONNECTION_DRIVER.getVarname(); - LOG.error(msg); - throw new RuntimeException(msg); - } - try { - LOG.info("Going to load JDBC driver {}", driverName); - driver = (Driver) Class.forName(driverName).newInstance(); - } catch (InstantiationException e) { - throw new RuntimeException("Unable to instantiate driver " + driverName + ", " + - e.getMessage(), e); - } catch (IllegalAccessException e) { + jdbcResource.bindDataSource(POOL_TX); + try { + jdbcResource.getJdbcTemplate().query("SELECT 1 FROM \"" + tableName + "\"", + new MapSqlParameterSource(), ResultSet::next); + } catch (DataAccessException e) { + LOG.debug("Catching sql exception in " + tableName + " check", e); + if (e.getCause() instanceof SQLException) { + if (dbProduct.isTableNotExistsError(e)) { + return false; + } else { throw new RuntimeException( - "Unable to access driver " + driverName + ", " + e.getMessage(), - e); - } catch (ClassNotFoundException e) { - throw new RuntimeException("Unable to find driver " + driverName + ", " + e.getMessage(), - e); - } - connString = MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY); - } - - try { - LOG.info("Connecting to transaction db with connection string {}", connString); - Properties connectionProps = new Properties(); - connectionProps.setProperty("user", username); - connectionProps.setProperty("password", password); - Connection conn = driver.connect(connString, connectionProps); - String prepareStmt = dbProduct != null ? dbProduct.getPrepareTxnStmt() : null; - if (prepareStmt != null) { - try (Statement stmt = conn.createStatement()) { - stmt.execute(prepareStmt); - } + "Unable to select from transaction database: " + SqlRetryHandler.getMessage(e) + StringUtils.stringifyException(e)); } - conn.setAutoCommit(false); - return conn; - } catch (SQLException e) { - throw new RuntimeException("Unable to connect to transaction manager using " + connString - + ", " + e.getMessage(), e); } + } finally { + jdbcResource.unbindDataSource(); } - - @Override - public PrintWriter getLogWriter() throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public void setLogWriter(PrintWriter out) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public void setLoginTimeout(int seconds) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public int getLoginTimeout() throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public java.util.logging.Logger getParentLogger() throws SQLFeatureNotSupportedException { - throw new UnsupportedOperationException(); - } - - @Override - public T unwrap(Class iface) throws SQLException { - throw new UnsupportedOperationException(); - } - - @Override - public boolean isWrapperFor(Class iface) throws SQLException { - throw new UnsupportedOperationException(); - } - } - - @Override - @RetrySemantics.SafeToRetry - public AbortCompactResponse abortCompactions(AbortCompactionRequest reqst) throws MetaException, NoSuchCompactionException { - Map abortCompactionResponseElements = new HashMap<>(); - AbortCompactResponse response = new AbortCompactResponse(new HashMap<>()); - response.setAbortedcompacts(abortCompactionResponseElements); - - List compactionIdsToAbort = reqst.getCompactionIds(); - if (compactionIdsToAbort.isEmpty()) { - LOG.info("Compaction ids are missing in request. No compactions to abort"); - throw new NoSuchCompactionException("Compaction ids missing in request. No compactions to abort"); - } - reqst.getCompactionIds().forEach(x -> abortCompactionResponseElements.put(x, getAbortCompactionResponseElement(x,"Error","No Such Compaction Id Available"))); - - List eligibleCompactionsToAbort = findEligibleCompactionsToAbort(abortCompactionResponseElements, - compactionIdsToAbort); - for (CompactionInfo compactionInfo : eligibleCompactionsToAbort) { - abortCompactionResponseElements.put(compactionInfo.id, abortCompaction(compactionInfo)); - } - return response; - } - - private AbortCompactionResponseElement getAbortCompactionResponseElement(long compactionId, String status, String message) { - AbortCompactionResponseElement resEle = new AbortCompactionResponseElement(compactionId); - resEle.setMessage(message); - resEle.setStatus(status); - return resEle; - } - - @RetrySemantics.SafeToRetry - private AbortCompactionResponseElement abortCompaction(CompactionInfo compactionInfo) throws MetaException { - SqlRetryFunction function = () -> { - jdbcResource.bindDataSource(POOL_TX); - try (TransactionContext context = jdbcResource.getTransactionManager().getTransaction(PROPAGATION_REQUIRED)) { - compactionInfo.state = TxnStore.ABORTED_STATE; - compactionInfo.errorMessage = "Compaction Aborted by Abort Comapction request."; - int updCount; - try { - updCount = jdbcResource.execute(new InsertCompactionInfoCommand(compactionInfo, getDbTime().getTime())); - } catch (Exception e) { - LOG.error("Unable to update compaction record: {}.", compactionInfo); - return getAbortCompactionResponseElement(compactionInfo.id, "Error", - "Error while aborting compaction:Unable to update compaction record in COMPLETED_COMPACTIONS"); - } - LOG.debug("Inserted {} entries into COMPLETED_COMPACTIONS", updCount); - try { - updCount = jdbcResource.getJdbcTemplate().update("DELETE FROM \"COMPACTION_QUEUE\" WHERE \"CQ_ID\" = :id", - new MapSqlParameterSource().addValue("id", compactionInfo.id)); - if (updCount != 1) { - LOG.error("Unable to update compaction record: {}. updCnt={}", compactionInfo, updCount); - return getAbortCompactionResponseElement(compactionInfo.id, "Error", - "Error while aborting compaction: Unable to update compaction record in COMPACTION_QUEUE"); - } else { - jdbcResource.getTransactionManager().commit(context); - return getAbortCompactionResponseElement(compactionInfo.id, "Success", - "Successfully aborted compaction"); - } - } catch (DataAccessException e) { - return getAbortCompactionResponseElement(compactionInfo.id, "Error", - "Error while aborting compaction:" + e.getMessage()); - } - } finally { - jdbcResource.unbindDataSource(); - } - }; - return sqlRetryHandler.executeWithRetry( - new SqlRetryCallProperties().withCallerId("abortCompaction(" + compactionInfo + ")"), function); + return true; } - - private List findEligibleCompactionsToAbort(Map abortCompactionResponseElements, List requestedCompId) throws MetaException { - List compactionInfoList = new ArrayList<>(); - String queryText = TxnQueries.SELECT_COMPACTION_QUEUE_BY_COMPID + " WHERE \"CC_ID\" IN (?) " ; - String sqlIN = requestedCompId.stream() - .map(x -> String.valueOf(x)) - .collect(Collectors.joining(",", "(", ")")); - queryText = queryText.replace("(?)", sqlIN); - try (Connection dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED); - Statement pStmt = dbConn.createStatement()) { - try (ResultSet rs = pStmt.executeQuery(queryText)) { - while (rs.next()) { - char compState = rs.getString(5).charAt(0); - long compID = rs.getLong(1); - if (CompactionState.INITIATED.equals(CompactionState.fromSqlConst(compState))) { - compactionInfoList.add(CompactionInfo.loadFullFromCompactionQueue(rs)); - } else { - abortCompactionResponseElements.put(compID, getAbortCompactionResponseElement(compID,"Error", - "Error while aborting compaction as compaction is in state-" + CompactionState.fromSqlConst(compState))); - } - } - } - } catch (SQLException e) { - throw new MetaException("Unable to select from transaction database-" + StringUtils.stringifyException(e)); + /** + * Returns the databases updated by txnId. + * Queries TXN_TO_WRITE_ID using txnId. + * + * @param txnId + * @throws MetaException + */ + private List getTxnDbsUpdated(long txnId) throws MetaException { + try { + return sqlRetryHandler.executeWithRetry( + new SqlRetryCallProperties().withCallerId("GetTxnDbsUpdatedHandler"), + () -> jdbcResource.execute(new GetTxnDbsUpdatedHandler(txnId))); + } catch (MetaException e) { + throw e; + } catch (TException e) { + throw new MetaException(e.getMessage()); } - return compactionInfoList; } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnLockManager.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnLockManager.java new file mode 100644 index 000000000000..e5f31f402124 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnLockManager.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.hadoop.hive.common.classification.RetrySemantics; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetry; +import org.springframework.transaction.annotation.Transactional; + +import static org.apache.hadoop.hive.metastore.txn.TxnStore.POOL_TX; + +public interface TxnLockManager { + + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) + long enqueueLock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; + + /** + * Check whether a lock has been obtained. This is used after {@link #enqueueLock(LockRequest)} returned a wait + * state. + * @param extLockId + * @param txnId Transaction id + * @param zeroWaitReadEnabled + * @param isExclusiveCTAS + * @return info on the state of the lock + * @throws NoSuchTxnException + * @throws NoSuchLockException + * @throws TxnAbortedException + * @throws MetaException + */ + @SqlRetry(lockInternally = true) + @Transactional(value = POOL_TX, noRollbackFor = {TxnAbortedException.class}) + LockResponse checkLock(long extLockId, long txnId, boolean zeroWaitReadEnabled, boolean isExclusiveCTAS) + throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException; + + /** + * Unlock a lock. It is not legal to call this if the caller is part of a txn. In that case + * the txn should be committed or aborted instead. (Note someday this will change since + * multi-statement transactions will allow unlocking in the transaction.) + * @param rqst lock to unlock + * @throws TxnOpenException + * @throws MetaException + */ + @SqlRetry + @Transactional(POOL_TX) + @RetrySemantics.Idempotent + void unlock(UnlockRequest rqst) + throws TxnOpenException, MetaException; + + /** + * Get information on current locks. + * @param rqst lock information to retrieve + * @return lock information. + * @throws MetaException + */ + @SqlRetry + @Transactional(POOL_TX) + @RetrySemantics.ReadOnly + ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException; + +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java index bb1a5858191c..f31308ba3973 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java @@ -18,17 +18,15 @@ package org.apache.hadoop.hive.metastore.txn; import com.google.common.annotations.VisibleForTesting; - import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.common.classification.RetrySemantics; -import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; -import org.apache.hadoop.hive.metastore.api.NoSuchCompactionException; -import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; import org.apache.hadoop.hive.metastore.api.AbortCompactResponse; import org.apache.hadoop.hive.metastore.api.AbortCompactionRequest; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.AbortTxnsRequest; import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest; import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse; @@ -55,6 +53,7 @@ import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdRequest; import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdResponse; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchCompactionException; import org.apache.hadoop.hive.metastore.api.NoSuchLockException; import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; @@ -74,17 +73,21 @@ import org.apache.hadoop.hive.metastore.api.UnlockRequest; import org.apache.hadoop.hive.metastore.api.UpdateTransactionalStatsRequest; import org.apache.hadoop.hive.metastore.events.ListenerEvent; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo; +import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData; +import org.apache.hadoop.hive.metastore.txn.entities.MetricsInfo; import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetry; -import org.apache.hadoop.hive.metastore.txn.retryhandling.SqlRetryHandler; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetry; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryException; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler; import org.springframework.transaction.annotation.Transactional; import java.sql.SQLException; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; -import java.util.Map; /** * A handler to answer transaction related calls that come into the metastore @@ -157,23 +160,29 @@ enum MUTEX_KEY { * @return information about open transactions * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException; /** * Get list of valid transactions. This gives just the list of transactions that are open. - * @return list of open transactions, as well as a high water mark. + * @return list of open transactions, as well as a high watermark. * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly GetOpenTxnsResponse getOpenTxns() throws MetaException; /** * Get list of valid transactions. This gives just the list of transactions that are open. * @param excludeTxnTypes : excludes this type of txns while getting the open txns - * @return list of open transactions, as well as a high water mark. + * @return list of open transactions, as well as a high watermark. * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly GetOpenTxnsResponse getOpenTxns(List excludeTxnTypes) throws MetaException; @@ -181,6 +190,8 @@ enum MUTEX_KEY { * Get the count for open transactions. * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly void countOpenTxns() throws MetaException; @@ -190,9 +201,13 @@ enum MUTEX_KEY { * @return information on opened transactions * @throws MetaException */ + @SqlRetry + @Transactional(value = POOL_TX, noRollbackFor = SqlRetryException.class) @RetrySemantics.Idempotent OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException; + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) @RetrySemantics.Idempotent long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException; @@ -202,6 +217,8 @@ enum MUTEX_KEY { * @throws NoSuchTxnException * @throws MetaException */ + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) @RetrySemantics.Idempotent void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException; @@ -211,6 +228,8 @@ enum MUTEX_KEY { * @throws NoSuchTxnException * @throws MetaException */ + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) @RetrySemantics.Idempotent void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException; @@ -221,7 +240,9 @@ enum MUTEX_KEY { * @throws TxnAbortedException * @throws MetaException */ - @RetrySemantics.Idempotent + @SqlRetry(lockInternally = true) + @Transactional(value = POOL_TX, noRollbackFor = TxnAbortedException.class) + @RetrySemantics.Idempotent("No-op if already committed") void commitTxn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; @@ -230,9 +251,12 @@ void commitTxn(CommitTxnRequest rqst) * @param rqst info on table/partitions and writeid snapshot to replicate. * @throws MetaException in case of failure */ - @RetrySemantics.Idempotent + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) + @RetrySemantics.Idempotent("No-op if already replicated the writeid state") void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException; + @Transactional(POOL_TX) void updateTransactionStatistics(UpdateTransactionalStatsRequest req) throws MetaException; /** @@ -243,6 +267,7 @@ void commitTxn(CommitTxnRequest rqst) * @param validTxnList valid transaction list for snapshot taken for current query * @throws MetaException */ + @Transactional(POOL_TX) @RetrySemantics.Idempotent Materialization getMaterializationInvalidationInfo( final CreationMetadata cm, final String validTxnList) @@ -252,27 +277,40 @@ Materialization getMaterializationInvalidationInfo( long getTxnIdForWriteId(String dbName, String tblName, long writeId) throws MetaException; + @SqlRetry + @Transactional(POOL_TX) + @RetrySemantics.ReadOnly long getLatestTxnIdInConflict(long txnid) throws MetaException; + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) LockResponse lockMaterializationRebuild(String dbName, String tableName, long txnId) throws MetaException; + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) boolean heartbeatLockMaterializationRebuild(String dbName, String tableName, long txnId) throws MetaException; + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) long cleanupMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) throws MetaException; - /** - * Gets the list of valid write ids for the given table wrt to current txn - * @param rqst info on transaction and list of table names associated with given transaction - * @throws NoSuchTxnException - * @throws MetaException - */ + /** + * Gets the list of valid write ids for the given table wrt to current txn + * @param rqst info on transaction and list of table names associated with given transaction + * @throws NoSuchTxnException + * @throws MetaException + */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) throws NoSuchTxnException, MetaException; + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry void addWriteIdsToMinHistory(long txnId, Map minOpenWriteIds) throws MetaException; @@ -283,6 +321,8 @@ GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst) * @throws TxnAbortedException * @throws MetaException */ + @SqlRetry(lockInternally = true, retryOnDuplicateKey = true) + @Transactional(POOL_TX) AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; @@ -291,6 +331,8 @@ AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest * @param rqst table for which the maximum writeId is requested * @return the maximum allocated writeId */ + @SqlRetry + @Transactional(POOL_TX) MaxAllocatedTableWriteIdResponse getMaxAllocatedTableWrited(MaxAllocatedTableWriteIdRequest rqst) throws MetaException; @@ -298,6 +340,8 @@ MaxAllocatedTableWriteIdResponse getMaxAllocatedTableWrited(MaxAllocatedTableWri * Called on conversion of existing table to full acid. Sets initial write ID to a high * enough value so that we can assign unique ROW__IDs to data in existing files. */ + @SqlRetry + @Transactional(POOL_TX) void seedWriteId(SeedTableWriteIdsRequest rqst) throws MetaException; /** @@ -305,6 +349,8 @@ MaxAllocatedTableWriteIdResponse getMaxAllocatedTableWrited(MaxAllocatedTableWri * If the actual txnId is greater it will throw an exception. * @param rqst */ + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) void seedTxnId(SeedTxnIdRequest rqst) throws MetaException; /** @@ -330,6 +376,7 @@ LockResponse lock(LockRequest rqst) * @throws TxnAbortedException * @throws MetaException */ + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry LockResponse checkLock(CheckLockRequest rqst) throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException; @@ -343,6 +390,7 @@ LockResponse checkLock(CheckLockRequest rqst) * @throws TxnOpenException * @throws MetaException */ + @Transactional(POOL_TX) @RetrySemantics.Idempotent void unlock(UnlockRequest rqst) throws NoSuchLockException, TxnOpenException, MetaException; @@ -353,6 +401,7 @@ void unlock(UnlockRequest rqst) * @return lock information. * @throws MetaException */ + @Transactional(POOL_TX) @RetrySemantics.ReadOnly ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException; @@ -364,6 +413,8 @@ void unlock(UnlockRequest rqst) * @throws TxnAbortedException * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry void heartbeat(HeartbeatRequest ids) throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException; @@ -374,6 +425,8 @@ void heartbeat(HeartbeatRequest ids) * @return info on txns that were heartbeated * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) throws MetaException; @@ -385,9 +438,13 @@ HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) * @return id of the compaction that has been started or existing id if this resource is already scheduled * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.Idempotent CompactionResponse compact(CompactionRequest rqst) throws MetaException; + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry boolean submitForCleanup(CompactionRequest rqst, long highestWriteId, long txnId) throws MetaException; @@ -397,6 +454,8 @@ HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) * @return compaction information * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException; @@ -408,6 +467,7 @@ HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) * @throws NoSuchCompactionException * @throws MetaException */ + @Transactional(POOL_TX) @RetrySemantics.Idempotent AbortCompactResponse abortCompactions(AbortCompactionRequest rqst) throws NoSuchCompactionException, MetaException; @@ -422,6 +482,8 @@ HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst) * partition specified by the request. * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo( GetLatestCommittedCompactionInfoRequest rqst) throws MetaException; @@ -433,6 +495,8 @@ GetLatestCommittedCompactionInfoResponse getLatestCommittedCompactionInfo( * @throws TxnAbortedException * @throws MetaException */ + @SqlRetry(lockInternally = true) + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry void addDynamicPartitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, MetaException; @@ -445,20 +509,28 @@ void addDynamicPartitions(AddDynamicPartitions rqst) * @param partitionIterator partition iterator * @throws MetaException */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.Idempotent default void cleanupRecords(HiveObjectType type, Database db, Table table, Iterator partitionIterator) throws MetaException { cleanupRecords(type, db, table, partitionIterator, false); } + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.Idempotent void cleanupRecords(HiveObjectType type, Database db, Table table, Iterator partitionIterator, boolean keepTxnToWriteIdMetaData) throws MetaException; + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.Idempotent void cleanupRecords(HiveObjectType type, Database db, Table table, Iterator partitionIterator, long txnId) throws MetaException; + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.Idempotent void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName, String newCatName, String newDbName, String newTabName, String newPartName) @@ -467,6 +539,7 @@ void onRename(String oldCatName, String oldDbName, String oldTabName, String old /** * Timeout transactions and/or locks. This should only be called by the compactor. */ + @Transactional(POOL_TX) @RetrySemantics.Idempotent void performTimeOuts(); @@ -712,6 +785,7 @@ Set findPotentialCompactions(int abortedThreshold, long abortedT * WriteSet tracking is used to ensure proper transaction isolation. This method deletes the * transaction metadata once it becomes unnecessary. */ + @Transactional(POOL_TX) @RetrySemantics.SafeToRetry void performWriteSetGC() throws MetaException; @@ -729,7 +803,8 @@ Set findPotentialCompactions(int abortedThreshold, long abortedT boolean checkFailedCompactions(CompactionInfo ci) throws MetaException; @VisibleForTesting - int numLocksInLockTable() throws SQLException, MetaException; + @Transactional(POOL_TX) + int getNumLocks() throws SQLException, MetaException; @VisibleForTesting long setTimeout(long milliseconds); @@ -755,6 +830,7 @@ interface MutexAPI { * The {@code key} is name of the lock. Will acquire an exclusive lock or block. It returns * a handle which must be used to release the lock. Each invocation returns a new handle. */ + @SqlRetry(lockInternally = true) LockHandle acquireLock(String key) throws MetaException; /** @@ -838,6 +914,8 @@ interface LockHandle extends AutoCloseable { * Returns ACID metadata related metrics info. * @return metrics info object */ + @SqlRetry + @Transactional(POOL_TX) @RetrySemantics.ReadOnly MetricsInfo getMetricsInfo() throws MetaException; @@ -855,7 +933,7 @@ interface LockHandle extends AutoCloseable { @SqlRetry @Transactional(POOL_COMPACTOR) CompactionMetricsData getCompactionMetricsData(String dbName, String tblName, String partitionName, - CompactionMetricsData.MetricType type) throws MetaException; + CompactionMetricsData.MetricType type) throws MetaException; /** * Remove records from the compaction metrics cache matching the filter criteria passed in as parameters diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStoreMutex.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStoreMutex.java new file mode 100644 index 000000000000..1013493a7913 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStoreMutex.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn; + +import org.apache.commons.lang3.NotImplementedException; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource; +import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext; +import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.dao.DataAccessException; +import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; + +import java.sql.SQLException; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.Semaphore; + +import static org.apache.hadoop.hive.metastore.txn.TxnStore.POOL_MUTEX; +import static org.springframework.transaction.TransactionDefinition.PROPAGATION_REQUIRED; + +public class TxnStoreMutex implements TxnStore.MutexAPI { + + private static final Logger LOG = LoggerFactory.getLogger(TxnStoreMutex.class); + /** + * must be static since even in UT there may be > 1 instance of TxnHandler + * (e.g. via Compactor services) + */ + private final static ConcurrentHashMap derbyKey2Lock = new ConcurrentHashMap<>(); + + + private final SQLGenerator sqlGenerator; + private final MultiDataSourceJdbcResource jdbcResource; + + public TxnStoreMutex(SQLGenerator sqlGenerator, MultiDataSourceJdbcResource jdbcResource) { + this.sqlGenerator = sqlGenerator; + this.jdbcResource = jdbcResource; + } + + @Override + public LockHandle acquireLock(String key) throws MetaException { + /** + * The implementation here is a bit kludgey but done so that code exercised by unit tests + * (which run against Derby which has no support for select for update) is as similar to + * production code as possible. + * In particular, with Derby we always run in a single process with a single metastore and + * the absence of For Update is handled via a Semaphore. The later would strictly speaking + * make the SQL statements below unnecessary (for Derby), but then they would not be tested. + */ + TransactionContext context = null; + try { + jdbcResource.bindDataSource(POOL_MUTEX); + context = jdbcResource.getTransactionManager().getNewTransaction(PROPAGATION_REQUIRED); + + MapSqlParameterSource paramSource = new MapSqlParameterSource().addValue("key", key); + String sqlStmt = sqlGenerator.addForUpdateClause("SELECT \"MT_COMMENT\", \"MT_KEY2\" FROM \"AUX_TABLE\" WHERE \"MT_KEY1\" = :key"); + + LOG.debug("About to execute SQL: {}", sqlStmt); + + Long lastUpdateTime = jdbcResource.getJdbcTemplate().query(sqlStmt, paramSource, rs -> rs.next() ? rs.getLong("MT_KEY2") : null); + if (lastUpdateTime == null) { + try { + jdbcResource.getJdbcTemplate().update("INSERT INTO \"AUX_TABLE\" (\"MT_KEY1\", \"MT_KEY2\") VALUES(:key, 0)", paramSource); + context.createSavepoint(); + } catch (DataAccessException e) { + if (!jdbcResource.getDatabaseProduct().isDuplicateKeyError(e)) { + throw new RuntimeException("Unable to lock " + key + " due to: " + SqlRetryHandler.getMessage(e), e); + } + //if here, it means a concrurrent acquireLock() inserted the 'key' + + //rollback is done for the benefit of Postgres which throws (SQLState=25P02, ErrorCode=0) if + //you attempt any stmt in a txn which had an error. + try { + jdbcResource.getConnection().rollback(); + } catch (SQLException ex) { + throw new MetaException("Unable to lock " + key + " due to: " + SqlRetryHandler.getMessage(ex) + "; " + StringUtils.stringifyException(ex)); + } + } + lastUpdateTime = jdbcResource.getJdbcTemplate().query(sqlStmt, paramSource, rs -> rs.next() ? rs.getLong("MT_KEY2") : null); + if (lastUpdateTime ==null) { + throw new IllegalStateException("Unable to lock " + key + ". Expected row in AUX_TABLE is missing."); + } + } + Semaphore derbySemaphore = null; + if (jdbcResource.getDatabaseProduct().isDERBY()) { + derbyKey2Lock.putIfAbsent(key, new Semaphore(1)); + derbySemaphore = derbyKey2Lock.get(key); + derbySemaphore.acquire(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("{} locked by {}", key, JavaUtils.hostname()); + } + //OK, so now we have a lock + return new LockHandleImpl(jdbcResource, context, key, lastUpdateTime, derbySemaphore); + } catch (InterruptedException ex) { + Thread.currentThread().interrupt(); + if (context != null) { + jdbcResource.getTransactionManager().rollback(context); + } + throw new MetaException("Unable to lock " + key + " due to: " + ex.getMessage() + StringUtils.stringifyException(ex)); + } catch (Throwable e) { + if (context != null) { + jdbcResource.getTransactionManager().rollback(context); + } + throw e; + } finally { + jdbcResource.unbindDataSource(); + } + } + + @Override + public void acquireLock(String key, LockHandle handle) throws MetaException { + //the idea is that this will use LockHandle.dbConn + throw new NotImplementedException("acquireLock(String, LockHandle) is not implemented"); + } + + public static final class LockHandleImpl implements LockHandle { + + private static final Logger LOG = LoggerFactory.getLogger(LockHandleImpl.class); + private static final String HOSTNAME = JavaUtils.hostname(); + + private final MultiDataSourceJdbcResource jdbcResource; + private final TransactionContext context; + private final Semaphore derbySemaphore; + private final String key; + private final Long lastUpdateTime; + + public LockHandleImpl(MultiDataSourceJdbcResource jdbcResource, TransactionContext context, String key, + Long lastUpdateTime, Semaphore derbySemaphore) { + assert derbySemaphore == null || derbySemaphore.availablePermits() == 0 : "Expected locked Semaphore"; + + this.jdbcResource = jdbcResource; + this.context = context; + this.derbySemaphore = derbySemaphore; + this.key = key; + this.lastUpdateTime = lastUpdateTime == null ? -1L : lastUpdateTime; + } + + @Override + public void releaseLocks() { + try { + jdbcResource.bindDataSource(POOL_MUTEX); + jdbcResource.getTransactionManager().rollback(context); + if (derbySemaphore != null) { + derbySemaphore.release(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("{} unlocked by {}", key, HOSTNAME); + } + } finally { + jdbcResource.unbindDataSource(); + } + } + + @Override + public Long getLastUpdateTime() { + return lastUpdateTime; + } + + @Override + public void releaseLocks(Long timestamp) { + try { + jdbcResource.bindDataSource(POOL_MUTEX); + try { + jdbcResource.getJdbcTemplate().update("UPDATE \"AUX_TABLE\" SET \"MT_KEY2\" = :time WHERE \"MT_KEY1\"= :key", + new MapSqlParameterSource() + .addValue("time", timestamp) + .addValue("key", key)); + jdbcResource.getTransactionManager().commit(context); + } catch (DataAccessException ex) { + LOG.warn("Unable to update MT_KEY2 value for MT_KEY1=" + key, ex); + } + + if (derbySemaphore != null) { + derbySemaphore.release(); + } + if (LOG.isDebugEnabled()) { + LOG.debug("{} unlocked by {}", key, HOSTNAME); + } + } finally { + jdbcResource.unbindDataSource(); + } + } + + @Override + public void close() { + releaseLocks(); + } + + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java index 35a8ab71cc8e..f490798be56a 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.metastore.txn; +import com.google.common.base.Splitter; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; @@ -28,18 +29,20 @@ import org.apache.hadoop.hive.metastore.api.CompactionType; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; +import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus; +import org.apache.hadoop.hive.metastore.utils.FileUtils; import org.apache.hadoop.hive.metastore.utils.JavaUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; -import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -50,10 +53,9 @@ import java.util.Arrays; import java.util.BitSet; import java.util.Collection; -import java.util.Comparator; import java.util.List; import java.util.Map; -import java.util.regex.Pattern; +import java.util.concurrent.ThreadLocalRandom; import java.util.stream.Collectors; import static org.apache.hadoop.hive.common.AcidConstants.SOFT_DELETE_TABLE; @@ -147,7 +149,7 @@ public static TxnStore getTxnStore(Configuration conf) { try { TxnStore handler = JavaUtils.getClass(className, TxnStore.class).newInstance(); handler.setConf(conf); - handler = ProxyTxnHandler.getProxy(handler, handler.getRetryHandler(), handler.getJdbcResourceHolder()); + handler = TransactionalRetryProxy.getProxy(handler.getRetryHandler(), handler.getJdbcResourceHolder(), handler); return handler; } catch (Exception e) { LOG.error("Unable to instantiate raw store directly in fastpath mode", e); @@ -410,50 +412,6 @@ public static List buildQueryWithINClauseStrings(Configuration conf, Li return ret; } - /** - * Executes the statement with an IN clause. If the number of elements or the length of the constructed statement would be - * too big, the IN clause will be split into multiple smaller ranges, and the statement will be executed multiple times. - * @param conf Hive configuration used to get the query and IN clause length limits. - * @param jdbcTemplate The {@link NamedParameterJdbcTemplate} instance to used for statement execution. - * @param query The query with the IN clause - * @param params A {@link MapSqlParameterSource} instance with the parameters of the query - * @param inClauseParamName The name of the parameter representing the content of the IN clause - * @param elements A {@link List} containing the elements to put in the IN clause - * @param comparator A {@link Comparator} instance used to find the longest element in the list. Used to - * estimate the length of the query. - * @return Returns the total number of affected rows. - * @param Type of the elements in the list. - */ - public static int executeStatementWithInClause(Configuration conf, NamedParameterJdbcTemplate jdbcTemplate, - String query, MapSqlParameterSource params, String inClauseParamName, - List elements, Comparator comparator) { - if (elements.size() == 0) { - throw new IllegalArgumentException("The elements list cannot be empty! An empty IN clause is invalid!"); - } - if (!Pattern.compile("IN\\s*\\(\\s*:" + inClauseParamName + "\\s*\\)", Pattern.CASE_INSENSITIVE).matcher(query).find()) { - throw new IllegalArgumentException("The query must contain the IN(:" + inClauseParamName + ") clause!"); - } - - int maxQueryLength = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH) * 1024; - int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE); - // The length of a single element is the string length of the longest element + 2 characters (comma, space) - int elementLength = elements.stream().max(comparator).get().toString().length() + 2; - // estimated base query size: query size + the length of all parameters. - int baseQuerySize = query.length() + params.getValues().values().stream().mapToInt(s -> s.toString().length()).sum(); - int maxElementsByLength = (maxQueryLength - baseQuerySize) / elementLength; - - int inClauseMaxSize = Math.min(batchSize, maxElementsByLength); - - int fromIndex = 0, totalCount = 0; - while (fromIndex < elements.size()) { - int endIndex = Math.min(elements.size(), fromIndex + inClauseMaxSize); - params.addValue(inClauseParamName, elements.subList(fromIndex, endIndex)); - totalCount += jdbcTemplate.update(query, params); - fromIndex = endIndex; - } - return totalCount; - } - /** * Compute and return the size of a query statement with the given parameters as input variables. * @@ -666,4 +624,76 @@ public static Character thriftCompactionType2DbType(CompactionType ct) throws Me public static String nvl(String input) { return input != null ? " = ? " : " IS NULL "; } + + public static String normalizePartitionCase(String s) { + if (s == null) { + return null; + } + Map map = Splitter.on(Path.SEPARATOR).withKeyValueSeparator('=').split(s); + return FileUtils.makePartName(new ArrayList<>(map.keySet()), new ArrayList<>(map.values())); + } + + @SuppressWarnings("squid:S2245") + public static long generateTemporaryId() { + return -1 * ThreadLocalRandom.current().nextLong(); + } + + public static boolean isValidTxn(long txnId) { + return txnId != 0; + } + + /** + * Used to raise an informative error when the caller expected a txn in a particular TxnStatus + * but found it in some other status + */ + public static void raiseTxnUnexpectedState(TxnStatus actualStatus, long txnid) + throws NoSuchTxnException, TxnAbortedException { + switch (actualStatus) { + case ABORTED: + throw new TxnAbortedException("Transaction " + JavaUtils.txnIdToString(txnid) + " already aborted"); + case COMMITTED: + throw new NoSuchTxnException("Transaction " + JavaUtils.txnIdToString(txnid) + " is already committed."); + case UNKNOWN: + throw new NoSuchTxnException("No such transaction " + JavaUtils.txnIdToString(txnid)); + case OPEN: + throw new NoSuchTxnException(JavaUtils.txnIdToString(txnid) + " is " + TxnStatus.OPEN); + default: + throw new IllegalArgumentException("Unknown TxnStatus " + actualStatus); + } + } + + /** + * Checks is the passed exception, or any of the root (cause) exceptions are an instance of {@link SQLException}. + * Returns with the found {@link SQLException} or throws an {@link IllegalArgumentException} if no {@link SQLException} + * found in the chain. + * @param ex The exception to check + * @return Returns with the {@link SQLException} found in the exception chain. + * @throws IllegalArgumentException Thrown if there is no {@link SQLException} in the exception chain + */ + public static SQLException getSqlException(Throwable ex) throws IllegalArgumentException { + while (ex != null && !(ex instanceof SQLException) ) { + ex = ex.getCause(); + } + if (ex == null) { + throw new IllegalArgumentException("No SQLException found in the exception chain!"); + } + return (SQLException)ex; + } + + public static String createUpdatePreparedStmt(String tableName, List columnNames, List conditionKeys) { + StringBuilder sb = new StringBuilder(); + sb.append("update " + tableName + " set "); + sb.append(columnNames.stream().map(col -> col + "=?").collect(Collectors.joining(","))); + sb.append(" where " + conditionKeys.stream().map(cond -> cond + "=?").collect(Collectors.joining(" and "))); + return sb.toString(); + } + + public static String createInsertPreparedStmt(String tableName, List columnNames) { + StringBuilder sb = new StringBuilder(); + sb.append("insert into " + tableName + "("); + sb.append(columnNames.stream().collect(Collectors.joining(","))); + String placeholder = columnNames.stream().map(col -> "?").collect(Collectors.joining(",")); + sb.append(") values (" + placeholder + ")"); + return sb.toString(); + } } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java index 40b32482cf93..ca8dff5b8f22 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionInfo.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionInfo.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import org.apache.commons.lang3.builder.ToStringBuilder; import org.apache.hadoop.hive.common.ValidCompactorWriteIdList; @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.OptionalCompactionInfoStruct; import org.apache.hadoop.hive.metastore.api.TableValidWriteIds; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; import org.apache.hadoop.hive.metastore.utils.StringableMap; import java.sql.PreparedStatement; @@ -208,7 +209,7 @@ public boolean equals(Object obj) { * @param rs ResultSet after call to rs.next() * @throws SQLException */ - static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLException, MetaException { + public static CompactionInfo loadFullFromCompactionQueue(ResultSet rs) throws SQLException { CompactionInfo fullCi = new CompactionInfo(); fullCi.id = rs.getLong(1); fullCi.dbname = rs.getString(2); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsData.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsData.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java index b56009563b47..33dd362307d4 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionMetricsData.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionMetricsData.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; public class CompactionMetricsData { diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionState.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionState.java similarity index 81% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionState.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionState.java index b0b2d0cd66ea..fdafd3fe259d 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionState.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/CompactionState.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import java.util.Arrays; import java.util.Map; @@ -28,22 +28,24 @@ * These are the valid values for Compaction states. */ public enum CompactionState { - INITIATED('i'), - WORKING('w'), - READY_FOR_CLEANING('r'), - FAILED('f'), - SUCCEEDED('s'), - DID_NOT_INITIATE('a'), - REFUSED('c'), - ABORTED('x'); + INITIATED('i', "initiated"), + WORKING('w', "working"), + READY_FOR_CLEANING('r', "ready for cleaning"), + FAILED('f', "failed"), + SUCCEEDED('s', "succeeded"), + DID_NOT_INITIATE('a', "did not initiate"), + REFUSED('c', "refused"), + ABORTED('x', "aborted"); + private final String message; private final char sqlConst; private static final Map LOOKUP = Arrays.stream(CompactionState.values()).collect(toMap(CompactionState::getSqlConst, identity())); - CompactionState(char sqlConst) { + CompactionState(char sqlConst, String message) { this.sqlConst = sqlConst; + this.message = message; } @Override diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/LockInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/LockInfo.java new file mode 100644 index 000000000000..a7550b6fed78 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/LockInfo.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.entities; + +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.hadoop.hive.metastore.api.LockState; +import org.apache.hadoop.hive.metastore.api.LockType; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement; +import org.apache.hadoop.hive.metastore.utils.JavaUtils; +import org.apache.hadoop.hive.metastore.utils.LockTypeUtil; + +import java.sql.ResultSet; +import java.sql.SQLException; + +public class LockInfo { + + public static final char LOCK_ACQUIRED = 'a'; + public static final char LOCK_WAITING = 'w'; + + + private final long extLockId; + private final long intLockId; + //0 means there is no transaction, i.e. it a select statement which is not part of + //explicit transaction or a IUD statement that is not writing to ACID table + private final long txnId; + private final String db; + private final String table; + private final String partition; + private final LockState state; + private final LockType type; + + // Assumes the result set is set to a valid row + public LockInfo(ResultSet rs) throws SQLException, MetaException { + extLockId = rs.getLong("HL_LOCK_EXT_ID"); // can't be null + intLockId = rs.getLong("HL_LOCK_INT_ID"); // can't be null + db = rs.getString("HL_DB"); // can't be null + String t = rs.getString("HL_TABLE"); + table = (rs.wasNull() ? null : t); + String p = rs.getString("HL_PARTITION"); + partition = (rs.wasNull() ? null : p); + switch (rs.getString("HL_LOCK_STATE").charAt(0)) { + case LOCK_WAITING: state = LockState.WAITING; break; + case LOCK_ACQUIRED: state = LockState.ACQUIRED; break; + default: + throw new MetaException("Unknown lock state " + rs.getString("HL_LOCK_STATE").charAt(0)); + } + char lockChar = rs.getString("HL_LOCK_TYPE").charAt(0); + type = LockTypeUtil.getLockTypeFromEncoding(lockChar) + .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar)); + txnId = rs.getLong("HL_TXNID"); //returns 0 if value is NULL + } + + public LockInfo(ShowLocksResponseElement e) { + extLockId = e.getLockid(); + intLockId = e.getLockIdInternal(); + txnId = e.getTxnid(); + db = e.getDbname(); + table = e.getTablename(); + partition = e.getPartname(); + state = e.getState(); + type = e.getType(); + } + + public long getExtLockId() { + return extLockId; + } + + public long getIntLockId() { + return intLockId; + } + + public long getTxnId() { + return txnId; + } + + public String getDb() { + return db; + } + + public String getTable() { + return table; + } + + public String getPartition() { + return partition; + } + + public LockState getState() { + return state; + } + + public LockType getType() { + return type; + } + + public boolean equals(Object other) { + if (!(other instanceof LockInfo)) return false; + LockInfo o = (LockInfo)other; + // Lock ids are unique across the system. + return extLockId == o.extLockId && intLockId == o.intLockId; + } + + @Override + public int hashCode() { + return new HashCodeBuilder() + .append(intLockId) + .append(extLockId) + .append(txnId) + .append(db) + .build(); + } + + @Override + public String toString() { + return JavaUtils.lockIdToString(extLockId) + " intLockId:" + + intLockId + " " + JavaUtils.txnIdToString(txnId) + + " db:" + db + " table:" + table + " partition:" + + partition + " state:" + (state == null ? "null" : state.toString()) + + " type:" + (type == null ? "null" : type.toString()); + } + private boolean isDbLock() { + return db != null && table == null && partition == null; + } + private boolean isTableLock() { + return db != null && table != null && partition == null; + } + private boolean isPartitionLock() { + return !(isDbLock() || isTableLock()); + } + +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/MetricsInfo.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/MetricsInfo.java similarity index 98% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/MetricsInfo.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/MetricsInfo.java index 8fc7d8fed0cc..b76a5d1070ca 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/MetricsInfo.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/MetricsInfo.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import java.util.Set; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OpenTxn.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OpenTxn.java similarity index 84% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OpenTxn.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OpenTxn.java index 8ef5fa167f30..60044977b2fa 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OpenTxn.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OpenTxn.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.TxnType; @@ -25,12 +25,6 @@ */ public class OpenTxn { - public static final String OPEN_TXNS_QUERY = "SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\", " - + "(%s - \"TXN_STARTED\") FROM \"TXNS\" ORDER BY \"TXN_ID\""; - public static final String OPEN_TXNS_INFO_QUERY = "SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\", " - + "(%s - \"TXN_STARTED\"), \"TXN_USER\", \"TXN_HOST\", \"TXN_STARTED\", \"TXN_LAST_HEARTBEAT\" " - + "FROM \"TXNS\" ORDER BY \"TXN_ID\""; - private long txnId; private TxnStatus status; private TxnType type; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OpenTxnList.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OpenTxnList.java similarity index 89% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OpenTxnList.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OpenTxnList.java index 4d7244e4f24f..d102f7efeefe 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OpenTxnList.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OpenTxnList.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse; @@ -27,15 +27,16 @@ import java.util.List; import static java.util.stream.Collectors.toList; -import static org.apache.hadoop.hive.metastore.txn.TxnStatus.ABORTED; -import static org.apache.hadoop.hive.metastore.txn.TxnStatus.OPEN; +import static org.apache.hadoop.hive.metastore.txn.entities.TxnStatus.ABORTED; +import static org.apache.hadoop.hive.metastore.txn.entities.TxnStatus.OPEN; /** * Class for the getOpenTxnList calculation. */ public class OpenTxnList { - private long hwm; - private List openTxnList; + + private final long hwm; + private final List openTxnList; public OpenTxnList(long hwm, List openTxnList) { this.hwm = hwm; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OperationType.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OperationType.java similarity index 97% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OperationType.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OperationType.java index 39cacd24e3eb..821ba9387522 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/OperationType.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/OperationType.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import org.apache.hadoop.hive.metastore.api.DataOperationType; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStatus.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnStatus.java similarity index 97% rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStatus.java rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnStatus.java index a9ad560cc2cb..570c6ab71366 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStatus.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/entities/TxnStatus.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hive.metastore.txn; +package org.apache.hadoop.hive.metastore.txn.entities; import org.apache.hadoop.hive.metastore.api.TxnState; diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/RemoveDuplicateCompleteTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/RemoveDuplicateCompleteTxnComponentsCommand.java deleted file mode 100644 index 0ff9fa3a4b35..000000000000 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/RemoveDuplicateCompleteTxnComponentsCommand.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.metastore.txn.impl; - -import org.apache.hadoop.hive.metastore.DatabaseProduct; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand; -import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; -import org.springframework.jdbc.core.namedparam.SqlParameterSource; - -import java.util.function.Function; - -import static org.apache.hadoop.hive.metastore.DatabaseProduct.DbType.MYSQL; - -public class RemoveDuplicateCompleteTxnComponentsCommand implements ParameterizedCommand { - - private RemoveDuplicateCompleteTxnComponentsCommand() {} - - @Override - public Function resultPolicy() { - return null; - } - - //language=SQL - @Override - public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException { - switch (databaseProduct.dbType) { - case MYSQL: - case SQLSERVER: - return "DELETE \"tc\" " + - "FROM \"COMPLETED_TXN_COMPONENTS\" \"tc\" " + - "INNER JOIN (" + - " SELECT \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", max(\"CTC_WRITEID\") \"highestWriteId\"" + - " FROM \"COMPLETED_TXN_COMPONENTS\"" + - " GROUP BY \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\") \"c\" " + - "ON \"tc\".\"CTC_DATABASE\" = \"c\".\"CTC_DATABASE\" AND \"tc\".\"CTC_TABLE\" = \"c\".\"CTC_TABLE\"" + - " AND (\"tc\".\"CTC_PARTITION\" = \"c\".\"CTC_PARTITION\" OR (\"tc\".\"CTC_PARTITION\" IS NULL AND \"c\".\"CTC_PARTITION\" IS NULL)) " + - "LEFT JOIN (" + - " SELECT \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", max(\"CTC_WRITEID\") \"updateWriteId\"" + - " FROM \"COMPLETED_TXN_COMPONENTS\"" + - " WHERE \"CTC_UPDATE_DELETE\" = 'Y'" + - " GROUP BY \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\") \"c2\" " + - "ON \"tc\".\"CTC_DATABASE\" = \"c2\".\"CTC_DATABASE\" AND \"tc\".\"CTC_TABLE\" = \"c2\".\"CTC_TABLE\"" + - " AND (\"tc\".\"CTC_PARTITION\" = \"c2\".\"CTC_PARTITION\" OR (\"tc\".\"CTC_PARTITION\" IS NULL AND \"c2\".\"CTC_PARTITION\" IS NULL)) " + - "WHERE \"tc\".\"CTC_WRITEID\" < \"c\".\"highestWriteId\" " + - (MYSQL == databaseProduct.dbType ? - " AND NOT \"tc\".\"CTC_WRITEID\" <=> \"c2\".\"updateWriteId\"" : - " AND (\"tc\".\"CTC_WRITEID\" != \"c2\".\"updateWriteId\" OR \"c2\".\"updateWriteId\" IS NULL)"); - case DERBY: - case ORACLE: - case CUSTOM: - return "DELETE from \"COMPLETED_TXN_COMPONENTS\" \"tc\"" + - "WHERE EXISTS (" + - " SELECT 1" + - " FROM \"COMPLETED_TXN_COMPONENTS\"" + - " WHERE \"CTC_DATABASE\" = \"tc\".\"CTC_DATABASE\"" + - " AND \"CTC_TABLE\" = \"tc\".\"CTC_TABLE\"" + - " AND (\"CTC_PARTITION\" = \"tc\".\"CTC_PARTITION\" OR (\"CTC_PARTITION\" IS NULL AND \"tc\".\"CTC_PARTITION\" IS NULL))" + - " AND (\"tc\".\"CTC_UPDATE_DELETE\"='N' OR \"CTC_UPDATE_DELETE\"='Y')" + - " AND \"tc\".\"CTC_WRITEID\" < \"CTC_WRITEID\")"; - case POSTGRES: - return "DELETE " + - "FROM \"COMPLETED_TXN_COMPONENTS\" \"tc\" " + - "USING (" + - " SELECT \"c1\".*, \"c2\".\"updateWriteId\" FROM" + - " (SELECT \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", max(\"CTC_WRITEID\") \"highestWriteId\"" + - " FROM \"COMPLETED_TXN_COMPONENTS\"" + - " GROUP BY \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\") \"c1\"" + - " LEFT JOIN" + - " (SELECT \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", max(\"CTC_WRITEID\") \"updateWriteId\"" + - " FROM \"COMPLETED_TXN_COMPONENTS\"" + - " WHERE \"CTC_UPDATE_DELETE\" = 'Y'" + - " GROUP BY \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\") \"c2\"" + - " ON \"c1\".\"CTC_DATABASE\" = \"c2\".\"CTC_DATABASE\" AND \"c1\".\"CTC_TABLE\" = \"c2\".\"CTC_TABLE\"" + - " AND (\"c1\".\"CTC_PARTITION\" = \"c2\".\"CTC_PARTITION\" OR (\"c1\".\"CTC_PARTITION\" IS NULL AND \"c2\".\"CTC_PARTITION\" IS NULL))" + - ") \"c\" " + - "WHERE \"tc\".\"CTC_DATABASE\" = \"c\".\"CTC_DATABASE\" AND \"tc\".\"CTC_TABLE\" = \"c\".\"CTC_TABLE\"" + - " AND (\"tc\".\"CTC_PARTITION\" = \"c\".\"CTC_PARTITION\" OR (\"tc\".\"CTC_PARTITION\" IS NULL AND \"c\".\"CTC_PARTITION\" IS NULL))" + - " AND \"tc\".\"CTC_WRITEID\" < \"c\".\"highestWriteId\" " + - " AND \"tc\".\"CTC_WRITEID\" IS DISTINCT FROM \"c\".\"updateWriteId\""; - default: - String msg = "Unknown database product: " + databaseProduct.dbType; - throw new MetaException(msg); - } - } - - @Override - public SqlParameterSource getQueryParameters() { - return new MapSqlParameterSource(); - } - - public static RemoveDuplicateCompleteTxnComponentsCommand INSTANCE = new RemoveDuplicateCompleteTxnComponentsCommand(); - -} \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/ConditionalCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/ConditionalCommand.java new file mode 100644 index 000000000000..75781f484623 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/ConditionalCommand.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.jdbc; + +import org.apache.hadoop.hive.metastore.DatabaseProduct; + +/** + * {@link ParameterizedCommand}. {@link ParameterizedBatchCommand}, and {@link InClauseBatchCommand} implementations can also + * implement this interface, marking that the execution is conditial, and the command wants to get notified about + * execution errors. Can be used to implement commands depending on some feature flag(s). + */ +public interface ConditionalCommand { + + /** + * Indicates if the command should be executed or not + * @param databaseProduct + * @return Returns true if the command can be executed, false otherwise. + */ + boolean shouldBeUsed(DatabaseProduct databaseProduct); + + /** + * Called in case of execution error in order to notify this command about the failure + * @param databaseProduct + * @param e The caught Exception + */ + void onError(DatabaseProduct databaseProduct, Exception e); + +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/InClauseBatchCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/InClauseBatchCommand.java new file mode 100644 index 000000000000..dc387e33fb53 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/InClauseBatchCommand.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.jdbc; + +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.springframework.jdbc.core.namedparam.SqlParameterSource; + +import java.util.Comparator; + +/** + * Represents a query with an IN() clause. The values inside the IN() clause are passed separately + * @param + */ +public class InClauseBatchCommand implements ParameterizedQuery { + + + private final String query; + private final SqlParameterSource queryParameters; + private final String inClauseParameterName; + private final Comparator parmeterLengthComparator; + + + public InClauseBatchCommand(String query, SqlParameterSource queryParameters, + String inClauseParameterName, Comparator parmeterLengthComparator) { + this.query = query; + this.queryParameters = queryParameters; + this.inClauseParameterName = inClauseParameterName; + this.parmeterLengthComparator = parmeterLengthComparator; + } + + /** + * The parameterized query string. The query must have exactly parameter inside the IN clause, and can have zero or + * more parameters everywhere else in the query string. + * @see ParameterizedQuery#getParameterizedQueryString(DatabaseProduct) + */ + public String getParameterizedQueryString(DatabaseProduct databaseProduct) { + return query; + } + + @Override + public SqlParameterSource getQueryParameters() { + return queryParameters; + } + + /** + * @return Returns with the name of the parameter which is inside the IN clause. + */ + public String getInClauseParameterName() { + return inClauseParameterName; + } + + /** + * @return Returns a {@link Comparator} instance which can be used to determine the longest element in the + * list IN clause parameters. This is required to be able to estimate the final legth of the command. + */ + public Comparator getParameterLengthComparator() { + return parmeterLengthComparator; + } + +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/MultiDataSourceJdbcResource.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/MultiDataSourceJdbcResource.java index 16f44f2caa82..7ab42c1336d8 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/MultiDataSourceJdbcResource.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/MultiDataSourceJdbcResource.java @@ -17,12 +17,19 @@ */ package org.apache.hadoop.hive.metastore.txn.jdbc; +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.metastore.DatabaseProduct; import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.txn.ContextNode; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.tools.SQLGenerator; +import org.apache.hadoop.hive.metastore.utils.StackThreadLocal; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter; import org.springframework.jdbc.core.ResultSetExtractor; +import org.springframework.jdbc.core.namedparam.MapSqlParameterSource; import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate; import org.springframework.jdbc.core.namedparam.SqlParameterSource; import org.springframework.jdbc.datasource.DataSourceTransactionManager; @@ -33,9 +40,13 @@ import javax.sql.DataSource; import java.sql.Connection; import java.sql.ResultSet; +import java.util.Arrays; +import java.util.Collection; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.function.Function; +import java.util.regex.Pattern; /** * Holds multiple {@link DataSource}s as a single object and offers JDBC related resources. @@ -46,19 +57,23 @@ public class MultiDataSourceJdbcResource { private static final Logger LOG = LoggerFactory.getLogger(MultiDataSourceJdbcResource.class); - private final ThreadLocal> threadLocal = new ThreadLocal<>(); + private final StackThreadLocal threadLocal = new StackThreadLocal<>(); private final Map dataSources = new HashMap<>(); private final Map transactionManagers = new HashMap<>(); private final Map jdbcTemplates = new HashMap<>(); private final DatabaseProduct databaseProduct; + private final Configuration conf; + private final SQLGenerator sqlGenerator; /** * Creates a new instance of the {@link MultiDataSourceJdbcResource} class * @param databaseProduct A {@link DatabaseProduct} instance representing the type of the underlying HMS dabatabe. */ - public MultiDataSourceJdbcResource(DatabaseProduct databaseProduct) { + public MultiDataSourceJdbcResource(DatabaseProduct databaseProduct, Configuration conf, SQLGenerator sqlGenerator) { this.databaseProduct = databaseProduct; + this.conf = conf; + this.sqlGenerator = sqlGenerator; } /** @@ -81,11 +96,11 @@ public void registerDataSource(String dataSourceName, DataSource dataSource) { * @param dataSourceName The name of the {@link DataSource} bind to the current {@link Thread}. */ public void bindDataSource(String dataSourceName) { - threadLocal.set(new ContextNode<>(threadLocal.get(), dataSourceName)); + threadLocal.set(dataSourceName); } public void bindDataSource(Transactional transactional) { - threadLocal.set(new ContextNode<>(threadLocal.get(), transactional.value())); + threadLocal.set(transactional.value()); } /** @@ -96,12 +111,18 @@ public void bindDataSource(Transactional transactional) { * {@link DataSource} the JDBC resources should be returned. */ public void unbindDataSource() { - ContextNode node = threadLocal.get(); - if (node != null && node.getParent() != null) { - threadLocal.set(node.getParent()); - } else { - threadLocal.remove(); - } + threadLocal.unset(); + } + + /** + * @return Returns the {@link Configuration} object used to create this {@link MultiDataSourceJdbcResource} instance. + */ + public Configuration getConf() { + return conf; + } + + public SQLGenerator getSqlGenerator() { + return sqlGenerator; } /** @@ -141,15 +162,6 @@ public DatabaseProduct getDatabaseProduct() { return databaseProduct; } - private String getDataSourceName() { - ContextNode node = threadLocal.get(); - if (node == null) { - throw new IllegalStateException("In order to access the JDBC resources, first you need to obtain a transaction " + - "using getTransaction(int propagation, String dataSourceName)!"); - } - return node.getValue(); - } - /** * Executes a {@link NamedParameterJdbcTemplate#update(String, org.springframework.jdbc.core.namedparam.SqlParameterSource)} * calls using the query string and parameters obtained from {@link ParameterizedCommand#getParameterizedQueryString(DatabaseProduct)} and @@ -161,9 +173,110 @@ private String getDataSourceName() { * @throws MetaException Forwarded from {@link ParameterizedCommand#getParameterizedQueryString(DatabaseProduct)} or * thrown if the update count was rejected by the {@link ParameterizedCommand#resultPolicy()} method */ - public Integer execute(ParameterizedCommand command) throws MetaException { - return execute(command.getParameterizedQueryString(getDatabaseProduct()), - command.getQueryParameters(), command.resultPolicy()); + public int execute(ParameterizedCommand command) throws MetaException { + if (!shouldExecute(command)) { + return -1; + } + try { + return execute(command.getParameterizedQueryString(getDatabaseProduct()), + command.getQueryParameters(), command.resultPolicy()); + } catch (Exception e) { + handleError(command, e); + throw e; + } + } + + /** + * Executes a {@link org.springframework.jdbc.core.JdbcTemplate#batchUpdate(String, Collection, int, ParameterizedPreparedStatementSetter)} + * call using the query string obtained from {@link ParameterizedBatchCommand#getParameterizedQueryString(DatabaseProduct)}, + * the parameters obtained from {@link ParameterizedBatchCommand#getQueryParameters()}, and the + * {@link org.springframework.jdbc.core.PreparedStatementSetter} obtained from + * {@link ParameterizedBatchCommand#getPreparedStatementSetter()} methods. The batchSize is coming from the + * {@link Configuration} object. + * + * @param command The {@link ParameterizedBatchCommand} to execute. + */ + public int[][] execute(ParameterizedBatchCommand command) throws MetaException { + if (!shouldExecute(command)) { + return null; + } + try { + int maxBatchSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.JDBC_MAX_BATCH_SIZE); + return getJdbcTemplate().getJdbcTemplate().batchUpdate( + command.getParameterizedQueryString(databaseProduct), + command.getQueryParameters(), + maxBatchSize, + command.getPreparedStatementSetter() + ); + } catch (Exception e) { + handleError(command, e); + throw e; + } + } + + /** + * Executes the passed {@link InClauseBatchCommand}. It estimates the length of the query and if it exceeds the limit + * set in {@link org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars#DIRECT_SQL_MAX_QUERY_LENGTH}, or the + * number of elements in the IN() clause exceeds + * {@link org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars#DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE}, the query + * will be split to multiple queries. + * @param command The {@link InClauseBatchCommand} to execute + * @return Returns with the number of affected rows in total. + * @param The type of the elements in the IN() clause + * @throws MetaException If {@link InClauseBatchCommand#getInClauseParameterName()} is blank, or the value of the + * IN() clause parameter in {@link InClauseBatchCommand#getQueryParameters()} is not exist or not an instance of List<T> + */ + public int execute(InClauseBatchCommand command) throws MetaException { + if (!shouldExecute(command)) { + return -1; + } + + List elements; + try { + if (StringUtils.isBlank(command.getInClauseParameterName())) { + throw new MetaException("The IN() clause parameter name (InClauseBatchCommand.getInClauseParameterName() " + + "cannot be blank!"); + } + try { + //noinspection unchecked + elements = (List) command.getQueryParameters().getValue(command.getInClauseParameterName()); + } catch (ClassCastException e) { + throw new MetaException("The parameter " + command.getInClauseParameterName() + "must be of type List!"); + } + MapSqlParameterSource params = (MapSqlParameterSource) command.getQueryParameters(); + String query = command.getParameterizedQueryString(databaseProduct); + if (CollectionUtils.isEmpty(elements)) { + throw new IllegalArgumentException("The elements list cannot be null or empty! An empty IN clause is invalid!"); + } + if (!Pattern.compile("IN\\s*\\(\\s*:" + command.getInClauseParameterName() + "\\s*\\)", Pattern.CASE_INSENSITIVE).matcher(query).find()) { + throw new IllegalArgumentException("The query must contain the IN(:" + command.getInClauseParameterName() + ") clause!"); + } + + int maxQueryLength = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH) * 1024; + int batchSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE); + // The length of a single element is the string length of the longest element + 2 characters (comma, space) + int elementLength = elements.isEmpty() ? 1 : elements + .stream() + .max(command.getParameterLengthComparator()) + .orElseThrow(IllegalStateException::new).toString().length() + 2; + // estimated base query size: query size + the length of all parameters. + int baseQueryLength = query.length(); + int maxElementsByLength = (maxQueryLength - baseQueryLength) / elementLength; + + int inClauseMaxSize = Math.min(batchSize, maxElementsByLength); + + int fromIndex = 0, totalCount = 0; + while (fromIndex < elements.size()) { + int endIndex = Math.min(elements.size(), fromIndex + inClauseMaxSize); + params.addValue(command.getInClauseParameterName(), elements.subList(fromIndex, endIndex)); + totalCount += getJdbcTemplate().update(query, params); + fromIndex = endIndex; + } + return totalCount; + } catch (Exception e) { + handleError(command, e); + throw e; + } } /** @@ -178,7 +291,7 @@ public Integer execute(ParameterizedCommand command) throws MetaException { * @throws MetaException Forwarded from {@link ParameterizedCommand#getParameterizedQueryString(DatabaseProduct)} or * thrown if the update count was rejected by the {@link ParameterizedCommand#resultPolicy()} method */ - public Integer execute(String query, SqlParameterSource params, + public int execute(String query, SqlParameterSource params, Function resultPolicy) throws MetaException { LOG.debug("Going to execute command <{}>", query); int count = getJdbcTemplate().update(query, params); @@ -200,7 +313,7 @@ public Integer execute(String query, SqlParameterSource params, * @return Returns with the object(s) constructed from the result of the executed query. * @throws MetaException Forwarded from {@link ParameterizedCommand#getParameterizedQueryString(DatabaseProduct)}. */ - public Result execute(QueryHandler queryHandler) throws MetaException { + public T execute(QueryHandler queryHandler) throws MetaException { String queryStr = queryHandler.getParameterizedQueryString(getDatabaseProduct()); LOG.debug("Going to execute query <{}>", queryStr); SqlParameterSource params = queryHandler.getQueryParameters(); @@ -211,4 +324,18 @@ public Result execute(QueryHandler queryHandler) throws MetaExc } } + private String getDataSourceName() { + return threadLocal.get(); + } + + private boolean shouldExecute(Object command) { + return !(command instanceof ConditionalCommand) || ((ConditionalCommand)command).shouldBeUsed(databaseProduct); + } + + private void handleError(Object command, Exception e) { + if (command instanceof ConditionalCommand) { + ((ConditionalCommand)command).onError(databaseProduct, e); + } + } + } diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/NoPoolConnectionPool.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/NoPoolConnectionPool.java new file mode 100644 index 000000000000..9b869be0f442 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/NoPoolConnectionPool.java @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.jdbc; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.sql.DataSource; +import java.io.PrintWriter; +import java.sql.Connection; +import java.sql.Driver; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Properties; + +public class NoPoolConnectionPool implements DataSource { + + private static final Logger LOG = LoggerFactory.getLogger(NoPoolConnectionPool.class); + + // Note that this depends on the fact that no-one in this class calls anything but + // getConnection. If you want to use any of the Logger or wrap calls you'll have to + // implement them. + private final Configuration conf; + private final DatabaseProduct dbProduct; + private Driver driver; + private String connString; + private String user; + private String passwd; + + public NoPoolConnectionPool(Configuration conf, DatabaseProduct dbProduct) { + this.conf = conf; + this.dbProduct = dbProduct; + } + + @Override + public Connection getConnection() throws SQLException { + if (user == null) { + user = DataSourceProvider.getMetastoreJdbcUser(conf); + passwd = DataSourceProvider.getMetastoreJdbcPasswd(conf); + } + return getConnection(user, passwd); + } + + @Override + public Connection getConnection(String username, String password) throws SQLException { + // Find the JDBC driver + if (driver == null) { + String driverName = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECTION_DRIVER); + if (driverName == null || driverName.equals("")) { + String msg = "JDBC driver for transaction db not set in configuration " + + "file, need to set " + MetastoreConf.ConfVars.CONNECTION_DRIVER.getVarname(); + LOG.error(msg); + throw new RuntimeException(msg); + } + try { + LOG.info("Going to load JDBC driver {}", driverName); + driver = (Driver) Class.forName(driverName).newInstance(); + } catch (InstantiationException e) { + throw new RuntimeException("Unable to instantiate driver " + driverName + ", " + + e.getMessage(), e); + } catch (IllegalAccessException e) { + throw new RuntimeException( + "Unable to access driver " + driverName + ", " + e.getMessage(), + e); + } catch (ClassNotFoundException e) { + throw new RuntimeException("Unable to find driver " + driverName + ", " + e.getMessage(), + e); + } + connString = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY); + } + + try { + LOG.info("Connecting to transaction db with connection string {}", connString); + Properties connectionProps = new Properties(); + connectionProps.setProperty("user", username); + connectionProps.setProperty("password", password); + Connection conn = driver.connect(connString, connectionProps); + String prepareStmt = dbProduct != null ? dbProduct.getPrepareTxnStmt() : null; + if (prepareStmt != null) { + try (Statement stmt = conn.createStatement()) { + stmt.execute(prepareStmt); + } + } + conn.setAutoCommit(false); + return conn; + } catch (SQLException e) { + throw new RuntimeException("Unable to connect to transaction manager using " + connString + + ", " + e.getMessage(), e); + } + } + + @Override + public PrintWriter getLogWriter() { + throw new UnsupportedOperationException(); + } + + @Override + public void setLogWriter(PrintWriter out) { + throw new UnsupportedOperationException(); + } + + @Override + public void setLoginTimeout(int seconds) { + throw new UnsupportedOperationException(); + } + + @Override + public int getLoginTimeout() { + throw new UnsupportedOperationException(); + } + + @Override + public java.util.logging.Logger getParentLogger() { + throw new UnsupportedOperationException(); + } + + @Override + public T unwrap(Class iface) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isWrapperFor(Class iface) { + throw new UnsupportedOperationException(); + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/ParameterizedBatchCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/ParameterizedBatchCommand.java new file mode 100644 index 000000000000..9f2647cb9f79 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/ParameterizedBatchCommand.java @@ -0,0 +1,61 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.jdbc; + +import org.apache.hadoop.hive.metastore.DatabaseProduct; +import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter; + +import java.util.List; +import java.util.function.Function; + +/** + * Represents a parameterized batch command (for exmaple an UPDATE statement) as a Spring + * {@link org.springframework.jdbc.core.JdbcTemplate} style parameterized query string + * (for example: UPDATE TBL SET COL1 = ? WHERE ID = ?), its parameters, and a result policy. The result policy + * is a Function<Integer, Boolean> function which must decide if the number of + * affected rows is acceptable or not. It is called for each result in the batch. + *

   
+ * Please note that for batch command, named parameters cannot be used! 
+ * (like UPDATE TBL SET COL1 = :value WHERE ID = :id)
+ */
+public interface ParameterizedBatchCommand {
+
+  /**
+   * The parameterized query string. It is allowed if the query has no parameters at all.
+   * @param databaseProduct A {@link DatabaseProduct} instance representing the type of the underlying HMS dabatabe.
+   * @return Returns the parameterized query string.
+   */
+  String getParameterizedQueryString(DatabaseProduct databaseProduct);
+
+  /**
+   * A {@link List} instance containing the required parameters for the query string.
+   */
+  List getQueryParameters();
+
+  /**
+   * Implementations must return a {@link ParameterizedPreparedStatementSetter} instance which will be 
+   * responsible for setting the parameter values for all the items in the batch 
+   */
+  ParameterizedPreparedStatementSetter getPreparedStatementSetter();
+
+  /**
+   * @return Returns the result policy to be used to validate the number of affected rows.
+   */
+  Function resultPolicy();  
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/RollbackException.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/RollbackException.java
new file mode 100644
index 000000000000..94474852878c
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/RollbackException.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc;
+
+import java.lang.reflect.Method;
+
+/**
+ * This exception can be used to trigger rollback in 
+ * {@link org.apache.hadoop.hive.metastore.txn.TransactionalRetryProxy#invoke(Object, Method, Object[])}
+ * for the current transaction, without propagating the exception to the caller. The proxy will catch this exception,
+ * rollback the transaction (if not yet completed already) and return the value supplied in the constructor to te caller.
+ */
+public class RollbackException extends RuntimeException {  
+  
+  private final Object result;
+  
+  public RollbackException(Object result) {
+    this.result = result;
+  }
+
+  public Object getResult() {
+    return result;
+  }
+  
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContext.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContext.java
index ec5e32643aa2..c5350ffc1711 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContext.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContext.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hive.metastore.txn.jdbc;
 
+import org.springframework.lang.NonNull;
+import org.springframework.transaction.TransactionException;
 import org.springframework.transaction.TransactionStatus;
 
 /**
@@ -26,7 +28,7 @@
  * In other words: This wrapper automatically rolls back uncommitted transactions, but the commit
  * needs to be done manually using {@link TransactionContextManager#commit(TransactionContext)} method.
  */
-public class TransactionContext implements AutoCloseable {
+public class TransactionContext implements TransactionStatus, AutoCloseable {
 
   private final TransactionStatus transactionStatus;
   private final TransactionContextManager transactionManager;
@@ -36,15 +38,61 @@ public class TransactionContext implements AutoCloseable {
     this.transactionManager = transactionManager;
   }
 
+  @Override
+  public boolean hasSavepoint() {
+    return transactionStatus.hasSavepoint();
+  }
+
+  @Override
+  public void flush() {
+    transactionStatus.flush();
+  }
+
+  @NonNull
+  @Override
+  public Object createSavepoint() throws TransactionException {
+    return transactionStatus.createSavepoint();
+  }
+
+  @Override
+  public void rollbackToSavepoint(@NonNull Object savepoint) throws TransactionException {
+    transactionStatus.rollbackToSavepoint(savepoint);
+  }
+
+  @Override
+  public void releaseSavepoint(@NonNull Object savepoint) throws TransactionException {
+    transactionStatus.releaseSavepoint(savepoint);
+  }
+
+  @Override
+  public boolean isNewTransaction() {
+    return transactionStatus.isNewTransaction();
+  }
+
+  @Override
+  public void setRollbackOnly() {
+    transactionStatus.setRollbackOnly();
+  }
+
+  @Override
+  public boolean isRollbackOnly() {
+    return transactionStatus.isRollbackOnly();
+  }
+
+  @Override
+  public boolean isCompleted() {
+    return transactionStatus.isCompleted();
+  }
+
   /**
    * @return Returns the {@link TransactionStatus} instance wrapped by this object.
    */
-  public TransactionStatus getTransactionStatus() {
+  TransactionStatus getTransactionStatus() {
     return transactionStatus;
   }
 
   /**
-   * @see TransactionContext TransactionWrapper class level javadoc.
+   * @see TransactionContext class level javadoc.
    */
   @Override
   public void close() {
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContextManager.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContextManager.java
index f91e57e0e604..5be42248fe73 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContextManager.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionContextManager.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hive.metastore.txn.jdbc;
 
+import org.apache.hadoop.hive.metastore.utils.StackThreadLocal;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.springframework.transaction.PlatformTransactionManager;
 import org.springframework.transaction.TransactionStatus;
 import org.springframework.transaction.support.DefaultTransactionDefinition;
@@ -29,8 +32,19 @@
  */
 public class TransactionContextManager {
 
+  private static final Logger LOG = LoggerFactory.getLogger(TransactionContextManager.class);
+  
   private final PlatformTransactionManager realTransactionManager;
 
+  /**
+   * We must keep track of the requested transactions, to be able to return the current transaction in {@link #getActiveTransaction()}.
+   * In Spring JDBCTemplate users have to choose programmatic transaction management to access the {@link TransactionStatus}
+   * object which can be used for savepoint management. With this enhancement, it is possible to identify and return
+   * the active transaction, which allows combining the use of {@link org.springframework.transaction.annotation.Transactional} 
+   * annotation with programmatic savepoint management.
+   */
+  private final StackThreadLocal contexts = new StackThreadLocal<>();
+
   TransactionContextManager(PlatformTransactionManager realTransactionManager) {
     this.realTransactionManager = realTransactionManager;
   }
@@ -38,26 +52,50 @@ public class TransactionContextManager {
   /**
    * Begins a new transaction or returns an existing, depending on the passed Transaction Propagation.
    * The created transaction is wrapped into a {@link TransactionContext} which is {@link AutoCloseable} and allows using
-   * the wrapper inside a try-with-resources block. 
+   * the wrapper inside a try-with-resources block.
+   *
    * @param propagation The transaction propagation to use.
    */
-  public TransactionContext getTransaction(int propagation) {
-      return new TransactionContext(realTransactionManager.getTransaction(new DefaultTransactionDefinition(propagation)), this);
+  public TransactionContext getNewTransaction(int propagation) {
+    TransactionContext context = new TransactionContext(realTransactionManager.getTransaction(
+        new DefaultTransactionDefinition(propagation)), this);
+    contexts.set(context);
+    return context;
   }
-
+  
+  public TransactionContext getActiveTransaction() {
+    return contexts.get();
+  }
+  
   public void commit(TransactionContext context) {
-      realTransactionManager.commit(context.getTransactionStatus());
+    TransactionContext storedContext = contexts.get();
+    if (!storedContext.equals(context)) {
+      throw new IllegalStateException();
+    }
+    try {
+      realTransactionManager.commit(context.getTransactionStatus());      
+    } finally {
+      contexts.unset();
+    }
   }
 
   public void rollback(TransactionContext context) {
+    TransactionContext storedContext = contexts.get();
+    if (!storedContext.equals(context)) {
+      throw new IllegalStateException();
+    }
+    try {
       realTransactionManager.rollback(context.getTransactionStatus());
+    } finally {
+      contexts.unset();
+    }
   }
 
   void rollbackIfNotCommitted(TransactionContext context) {
-    TransactionStatus status = context.getTransactionStatus();
-      if (!status.isCompleted()) {
-        realTransactionManager.rollback(status);
-      }
+    if (!context.isCompleted()) {
+      LOG.debug("The transaction is not committed and we are leaving the try-with-resources block. Going to rollback: {}", context);
+      rollback(context);
+    }
   }
 
-}
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionalFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionalFunction.java
index 7917039245c1..5f38ee961ce5 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionalFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/TransactionalFunction.java
@@ -18,6 +18,9 @@
 package org.apache.hadoop.hive.metastore.txn.jdbc;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
 
 /**
  * A functional interface representing a function call (typically a query or statement) which has a result and done within
@@ -26,7 +29,7 @@
  */
 @FunctionalInterface
 public interface TransactionalFunction {
-
+  
   /**
    * Implementations typically should execute transsactional database calls inside.
    * @param jdbcResource A {@link MultiDataSourceJdbcResource} instance responsible for providing all the necessary resources 
@@ -35,6 +38,7 @@ public interface TransactionalFunction {
    * @throws org.springframework.dao.DataAccessException Thrown if any of the JDBC calls fail
    * @throws MetaException Thrown in case of application error within the function
    */
-  Result execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException;
+  Result execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, NoSuchTxnException, TxnAbortedException, 
+      NoSuchLockException;
 
 }
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java
new file mode 100644
index 000000000000..dfe87bd34085
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToMinHistoryCommand.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ConditionalCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedBatchCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+
+public class AddWriteIdsToMinHistoryCommand implements ParameterizedBatchCommand, ConditionalCommand {
+
+  private static final String MIN_HISTORY_WRITE_ID_INSERT_QUERY = "INSERT INTO \"MIN_HISTORY_WRITE_ID\" (\"MH_TXNID\", " +
+      "\"MH_DATABASE\", \"MH_TABLE\", \"MH_WRITEID\") VALUES (?, ?, ?, ?)";
+  
+  private final List params;
+
+  public AddWriteIdsToMinHistoryCommand(long txnId, Map minOpenWriteIds) {
+    this.params = new ArrayList<>();
+    for (Map.Entry validWriteId : minOpenWriteIds.entrySet()) {
+      String[] names = TxnUtils.getDbTableName(validWriteId.getKey());
+      params.add(new Object[]{ txnId, names[0], names[1], validWriteId.getValue() });
+    }
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) {
+    return MIN_HISTORY_WRITE_ID_INSERT_QUERY;
+  }
+
+  @Override
+  public List getQueryParameters() {
+    return params;
+  }
+
+  @Override
+  public ParameterizedPreparedStatementSetter getPreparedStatementSetter() {
+    return (ps, argument) -> {
+      ps.setLong(1, (Long)argument[0]);
+      ps.setString(2, argument[1].toString());
+      ps.setString(3, argument[2].toString());
+      ps.setLong(4, (Long)argument[3]);
+    };
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return ParameterizedCommand.EXACTLY_ONE_ROW;
+  }
+
+  @Override
+  public boolean shouldBeUsed(DatabaseProduct databaseProduct) {
+    return TxnHandler.ConfVars.useMinHistoryWriteId();
+  }
+
+  @Override
+  public void onError(DatabaseProduct databaseProduct, Exception e) {
+    if (databaseProduct.isTableNotExistsError(e)) {
+      // If the table does not exists anymore, we disable the flag and start to work the new way
+      // This enables to switch to the new functionality without a restart
+      TxnHandler.ConfVars.setUseMinHistoryWriteId(false);
+    }
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java
new file mode 100644
index 000000000000..eaa6d1e5d9be
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/AddWriteIdsToTxnToWriteIdCommand.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedBatchCommand;
+import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Function;
+
+public class AddWriteIdsToTxnToWriteIdCommand implements ParameterizedBatchCommand {
+
+  private final List params;
+
+  public AddWriteIdsToTxnToWriteIdCommand(String dbName, String tableName, long writeId, List txnIds, List txnToWriteIds) {
+    this.params = new ArrayList<>();
+    for (long txnId : txnIds) {
+      params.add(new Object[]{ txnId, dbName, tableName, writeId });
+      txnToWriteIds.add(new TxnToWriteId(txnId, writeId));
+      writeId++;
+    }
+  }
+
+  
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) {
+    return "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\",  \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?)";
+  }
+
+  @Override
+  public List getQueryParameters() {
+    return params;
+  }
+
+  @Override
+  public ParameterizedPreparedStatementSetter getPreparedStatementSetter() {
+    return (ps, argument) -> {
+      ps.setLong(1, (Long)argument[0]);
+      ps.setString(2, argument[1].toString());
+      ps.setString(3, argument[2].toString());
+      ps.setLong(4, (Long)argument[3]);
+    };
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return null;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/DeleteInvalidOpenTxnsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/DeleteInvalidOpenTxnsCommand.java
new file mode 100644
index 000000000000..3a218ca9e51d
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/DeleteInvalidOpenTxnsCommand.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.txn.jdbc.InClauseBatchCommand;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.List;
+
+public class DeleteInvalidOpenTxnsCommand extends InClauseBatchCommand {
+
+  public DeleteInvalidOpenTxnsCommand(List txnids) {
+    super("DELETE FROM \"TXNS\" WHERE \"TXN_ID\" IN (:txnIds)", 
+        new MapSqlParameterSource().addValue("txnIds", txnids),
+        "txnIds", Long::compareTo);
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/DeleteReplTxnMapEntryCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/DeleteReplTxnMapEntryCommand.java
new file mode 100644
index 000000000000..556a509b1f05
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/DeleteReplTxnMapEntryCommand.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.util.function.Function;
+
+public class DeleteReplTxnMapEntryCommand implements ParameterizedCommand {
+    
+  private final long sourceTxnId;
+  private final String replicationPolicy;
+
+  public DeleteReplTxnMapEntryCommand(long sourceTxnId, String replicationPolicy) {
+    this.sourceTxnId = sourceTxnId;
+    this.replicationPolicy = replicationPolicy;
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return null;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "DELETE FROM \"REPL_TXN_MAP\" WHERE \"RTM_SRC_TXN_ID\" = :sourceTxnId AND \"RTM_REPL_POLICY\" = :replPolicy";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("sourceTxnId", sourceTxnId)
+        .addValue("replPolicy", replicationPolicy);
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/InsertCompactionInfoCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java
similarity index 96%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/InsertCompactionInfoCommand.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java
index da0e2126a3f7..4af24c5d4a50 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/InsertCompactionInfoCommand.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionInfoCommand.java
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
@@ -34,7 +34,7 @@ public class InsertCompactionInfoCommand implements ParameterizedCommand {
 
   private final long compactionEndTime;
 
-  // language=PostgreSQL
+  //language=SQL
   private static final String INSERT =
       "INSERT INTO \"COMPLETED_COMPACTIONS\" " +
           "   (\"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", " +
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java
new file mode 100644
index 000000000000..7e2d9bf52b5c
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompactionRequestCommand.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionState;
+import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.apache.hadoop.hive.metastore.utils.StringableMap;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.Types;
+import java.util.function.Function;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class InsertCompactionRequestCommand implements ParameterizedCommand {
+  
+  private final long id;
+  private final CompactionState compactionState;
+  private final CompactionRequest rqst;
+  private Long highestWriteId = null;
+  private Long txnId = null;
+
+  public InsertCompactionRequestCommand(long id, CompactionState compactionState, CompactionRequest rqst) {
+    this.id = id;
+    this.compactionState = compactionState;
+    this.rqst = rqst;
+  }
+  
+  public InsertCompactionRequestCommand withTxnDetails(long highestWriteId, long txnId) {
+    this.highestWriteId = highestWriteId;
+    this.txnId = txnId;
+    return this;
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return ParameterizedCommand.EXACTLY_ONE_ROW;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "INSERT INTO \"COMPACTION_QUEUE\" (\"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", \"CQ_STATE\", " +
+        "\"CQ_TYPE\", \"CQ_POOL_NAME\", \"CQ_NUMBER_OF_BUCKETS\", \"CQ_ORDER_BY\", \"CQ_TBLPROPERTIES\", \"CQ_RUN_AS\", " +
+        "\"CQ_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\", \"CQ_HIGHEST_WRITE_ID\", \"CQ_TXN_ID\", \"CQ_ENQUEUE_TIME\") " +
+        "VALUES(:id, :dbName, :tableName, :partition, :state, :type, :poolName, :buckets, :orderBy, :tblProperties, " +
+        ":runAs, :initiatorId, :initiatorVersion, :highestWriteId, :txnId, " + getEpochFn(databaseProduct) + ")";
+  }
+  
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    try {
+      return new MapSqlParameterSource()
+          .addValue("id", id)
+          .addValue("dbName", rqst.getDbname(), Types.VARCHAR)
+          .addValue("tableName", rqst.getTablename(), Types.VARCHAR)
+          .addValue("partition", rqst.getPartitionname(), Types.VARCHAR)
+          .addValue("state", compactionState.getSqlConst(), Types.VARCHAR)
+          .addValue("type", TxnUtils.thriftCompactionType2DbType(rqst.getType()), Types.VARCHAR)
+          .addValue("poolName", rqst.getPoolName(), Types.VARCHAR)
+          .addValue("buckets", rqst.isSetNumberOfBuckets() ? rqst.getNumberOfBuckets() : null, Types.INTEGER)
+          .addValue("orderBy", rqst.getOrderByClause(), Types.VARCHAR)
+          .addValue("tblProperties", rqst.getProperties() == null ? null : new StringableMap(rqst.getProperties()), Types.VARCHAR)
+          .addValue("runAs", rqst.getRunas(), Types.VARCHAR)
+          .addValue("initiatorId", rqst.getInitiatorId(), Types.VARCHAR)
+          .addValue("initiatorVersion", rqst.getInitiatorVersion(), Types.VARCHAR)
+          .addValue("highestWriteId", highestWriteId, Types.BIGINT)
+          .addValue("txnId", txnId, Types.BIGINT);
+    } catch (MetaException e) {
+      throw new MetaWrapperException(e);
+    }
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java
new file mode 100644
index 000000000000..9f9086b755db
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertCompletedTxnComponentsCommand.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedBatchCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter;
+
+import java.util.List;
+import java.util.function.Function;
+
+public class InsertCompletedTxnComponentsCommand implements ParameterizedBatchCommand {
+
+  private final long txnId;
+  private final char isUpdateDelete;
+  private final List infos;
+
+  public InsertCompletedTxnComponentsCommand(long txnId, char isUpdateDelete, List infos) {
+    this.txnId = txnId;
+    this.isUpdateDelete = isUpdateDelete;
+    this.infos = infos;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) {
+    return
+        "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" " +
+        "(\"CTC_TXNID\", \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\") " +
+        "VALUES (?, ?, ?, ?, ?, ?)";
+  }
+
+  @Override
+  public List getQueryParameters() {
+    return infos;
+  }
+
+  @Override
+  public ParameterizedPreparedStatementSetter getPreparedStatementSetter() {
+    return (ps, argument) -> {
+      ps.setLong(1, txnId);
+      ps.setString(2, argument.getDatabase());
+      ps.setString(3, argument.getTable());
+      ps.setString(4, argument.getPartition());
+      ps.setLong(5, argument.getWriteId());
+      ps.setString(6, Character.toString(isUpdateDelete));
+    };
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return ParameterizedCommand.EXACTLY_ONE_ROW;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertHiveLocksCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertHiveLocksCommand.java
new file mode 100644
index 000000000000..6ef5465616d0
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertHiveLocksCommand.java
@@ -0,0 +1,97 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedBatchCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.apache.hadoop.hive.metastore.utils.LockTypeUtil;
+import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.Function;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+import static org.apache.hadoop.hive.metastore.txn.entities.LockInfo.LOCK_WAITING;
+
+public class InsertHiveLocksCommand implements ParameterizedBatchCommand {
+  
+  private final LockRequest lockRequest;
+  private final long tempExtLockId;
+
+  public InsertHiveLocksCommand(LockRequest lockRequest, long tempExtLockId) {
+    this.lockRequest = lockRequest;
+    this.tempExtLockId = tempExtLockId;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) {
+    try {
+      //language=SQL
+      return String.format( 
+          "INSERT INTO \"HIVE_LOCKS\" ( " +
+          "\"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", " +
+          "\"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", \"HL_USER\", \"HL_HOST\", \"HL_AGENT_INFO\") " +
+          "VALUES (?, ?, ?, ?, ?, ?, ?, ?, %s, ?, ?, ?)", lockRequest.getTxnid() != 0 ? "0" : getEpochFn(databaseProduct));
+    } catch (MetaException e) {
+      throw new MetaWrapperException(e);
+    }
+  }
+
+  @Override
+  public List getQueryParameters() {
+    List params = new ArrayList<>(lockRequest.getComponentSize());
+    long intLockId = 0;
+    for (LockComponent lc : lockRequest.getComponent()) {
+      String lockType = LockTypeUtil.getEncodingAsStr(lc.getType());
+      params.add(new Object[] {tempExtLockId, ++intLockId, lockRequest.getTxnid(), StringUtils.lowerCase(lc.getDbname()),
+          StringUtils.lowerCase(lc.getTablename()), TxnUtils.normalizePartitionCase(lc.getPartitionname()),
+          Character.toString(LOCK_WAITING), lockType, lockRequest.getUser(), lockRequest.getHostname(), lockRequest.getAgentInfo()});
+    }
+    return params;
+  }
+
+  @Override
+  public ParameterizedPreparedStatementSetter getPreparedStatementSetter() {
+    return (ps, argument) -> {
+      ps.setLong(1, (Long)argument[0]);
+      ps.setLong(2, (Long)argument[1]);
+      ps.setLong(3, (Long)argument[2]);
+      ps.setString(4, (String)argument[3]);
+      ps.setString(5, (String)argument[4]);
+      ps.setString(6, (String)argument[5]);
+      ps.setString(7, (String)argument[6]);
+      ps.setString(8, (String)argument[7]);
+      ps.setString(9, (String)argument[8]);
+      ps.setString(10, (String)argument[9]);
+      ps.setString(11, (String)argument[10]);
+    };
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return ParameterizedCommand.EXACTLY_ONE_ROW;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java
new file mode 100644
index 000000000000..2f4da649c5ad
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/InsertTxnComponentsCommand.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
+import org.apache.hadoop.hive.metastore.api.DataOperationType;
+import org.apache.hadoop.hive.metastore.api.LockComponent;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.txn.entities.OperationType;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedBatchCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.springframework.jdbc.core.ParameterizedPreparedStatementSetter;
+
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.function.Function;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+public class InsertTxnComponentsCommand implements ParameterizedBatchCommand {
+  
+  private final LockRequest lockRequest;
+  private final Map, Long> writeIds;
+  private final AddDynamicPartitions dynamicPartitions;
+
+  public InsertTxnComponentsCommand(LockRequest lockRequest, Map, Long> writeIds) {
+    this.lockRequest = lockRequest;
+    this.writeIds = writeIds;
+    this.dynamicPartitions = null;
+  }
+
+  public InsertTxnComponentsCommand(AddDynamicPartitions dynamicPartitions) {
+    this.dynamicPartitions = dynamicPartitions;
+    this.lockRequest = null;
+    this.writeIds = null;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) {
+    return "INSERT INTO \"TXN_COMPONENTS\" (" +
+        "\"TC_TXNID\", \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_OPERATION_TYPE\", \"TC_WRITEID\")" +
+        " VALUES (?, ?, ?, ?, ?, ?)";
+  }
+
+  @Override
+  public List getQueryParameters() {
+    return dynamicPartitions == null ? getQueryParametersByLockRequest() : getQueryParametersByDynamicPartitions();
+  }
+
+  @Override
+  public ParameterizedPreparedStatementSetter getPreparedStatementSetter() {
+    return (ps, argument) -> {
+      ps.setLong(1, (Long)argument[0]);
+      ps.setString(2, (String)argument[1]);
+      ps.setString(3, (String)argument[2]);
+      ps.setString(4, (String)argument[3]);
+      ps.setString(5, (String)argument[4]);
+      ps.setObject(6, argument[5], Types.BIGINT);
+    };
+  }
+
+  @Override
+  public Function resultPolicy() {
+    return ParameterizedCommand.EXACTLY_ONE_ROW;
+  }
+  
+  private List getQueryParametersByLockRequest() {
+    assert lockRequest != null;
+    List params = new ArrayList<>(lockRequest.getComponentSize());
+    Set> alreadyAddedTables = new HashSet<>();
+
+    for (LockComponent lc : lockRequest.getComponent()) {
+      if (lc.isSetIsTransactional() && !lc.isIsTransactional()) {
+        //we don't prevent using non-acid resources in a txn, but we do lock them
+        continue;
+      }
+      if (!shouldUpdateTxnComponent(lockRequest.getTxnid(), lockRequest, lc)) {
+        continue;
+      }
+
+      Function> getWriteIdKey = lockComponent ->
+          Pair.of(StringUtils.lowerCase(lockComponent.getDbname()), StringUtils.lowerCase(lockComponent.getTablename()));
+
+      String dbName = StringUtils.lowerCase(lc.getDbname());
+      String tblName = StringUtils.lowerCase(lc.getTablename());
+      String partName = TxnUtils.normalizePartitionCase(lc.getPartitionname());
+      OperationType opType = OperationType.fromDataOperationType(lc.getOperationType());
+      Pair writeIdKey = getWriteIdKey.apply(lc);
+
+
+      Predicate isDynPart = lockComponent -> lockComponent.isSetIsDynamicPartitionWrite() && lockComponent.isIsDynamicPartitionWrite();
+      Set> isDynPartUpdate = lockRequest.getComponent().stream().filter(isDynPart)
+          .filter(lockComponent -> lockComponent.getOperationType() == DataOperationType.UPDATE || lockComponent.getOperationType() == DataOperationType.DELETE)
+          .map(getWriteIdKey)
+          .collect(Collectors.toSet());
+
+      if (isDynPart.test(lc)) {
+        partName = null;
+        if (alreadyAddedTables.contains(writeIdKey)) {
+          continue;
+        }
+        opType = isDynPartUpdate.contains(writeIdKey) ? OperationType.UPDATE : OperationType.INSERT;
+      }
+      Long writeId = writeIds.get(writeIdKey);
+
+      params.add(new Object[]{lockRequest.getTxnid(), dbName, tblName, partName, opType.getSqlConst(), writeId});
+      alreadyAddedTables.add(writeIdKey);
+    }
+    return params;    
+  }
+
+  private List getQueryParametersByDynamicPartitions() {
+    assert dynamicPartitions != null;
+    //for RU this may be null so we should default it to 'u' which is most restrictive
+    OperationType ot = OperationType.UPDATE;
+    if (dynamicPartitions.isSetOperationType()) {
+      ot = OperationType.fromDataOperationType(dynamicPartitions.getOperationType());
+    }
+    
+    List params = new ArrayList<>(dynamicPartitions.getPartitionnamesSize());
+    for (String partName : dynamicPartitions.getPartitionnames()) {
+      params.add(new Object[]{
+          dynamicPartitions.getTxnid(),
+          dynamicPartitions.getDbname().toLowerCase(),
+          dynamicPartitions.getTablename().toLowerCase(),
+          partName,
+          ot.getSqlConst(),
+          dynamicPartitions.getWriteid()
+      });
+    }
+    return params;
+  }
+
+  private boolean shouldUpdateTxnComponent(long txnid, LockRequest rqst, LockComponent lc) {
+    if(!lc.isSetOperationType()) {
+      //request came from old version of the client
+      return true; //this matches old behavior
+    }
+    else {
+      switch (lc.getOperationType()) {
+        case INSERT:
+        case UPDATE:
+        case DELETE:
+          return true;
+        case SELECT:
+          return false;
+        case NO_TXN:
+              /*this constant is a bit of a misnomer since we now always have a txn context.  It
+               just means the operation is such that we don't care what tables/partitions it
+               affected as it doesn't trigger a compaction or conflict detection.  A better name
+               would be NON_TRANSACTIONAL.*/
+          return false;
+        default:
+          //since we have an open transaction, only 4 values above are expected
+          throw new IllegalStateException("Unexpected DataOperationType: " + lc.getOperationType()
+              + " agentInfo=" + rqst.getAgentInfo() + " " + JavaUtils.txnIdToString(txnid));
+      }
+    }
+  }
+  
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/RemoveCompactionMetricsDataCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java
similarity index 94%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/RemoveCompactionMetricsDataCommand.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java
index 8e07104760d2..a3fd4a41abc2 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/RemoveCompactionMetricsDataCommand.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveCompactionMetricsDataCommand.java
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionMetricsData;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData;
 import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
 import org.springframework.jdbc.core.namedparam.SqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java
new file mode 100644
index 000000000000..ca481a05c833
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveDuplicateCompleteTxnComponentsCommand.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ParameterizedCommand;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.util.function.Function;
+
+import static org.apache.hadoop.hive.metastore.DatabaseProduct.DbType.MYSQL;
+
+public class RemoveDuplicateCompleteTxnComponentsCommand implements ParameterizedCommand {
+  
+  private RemoveDuplicateCompleteTxnComponentsCommand() {}
+  
+  @Override
+  public Function resultPolicy() {
+    return null;
+  }
+
+  //language=SQL
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    switch (databaseProduct.dbType) {
+      case MYSQL:
+      case SQLSERVER:
+        return "DELETE tc " +
+            "FROM \"COMPLETED_TXN_COMPONENTS\" tc " +
+            "INNER JOIN (" +
+            "    SELECT \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\"," +
+            "        MAX(\"CTC_WRITEID\") highestWriteId," +
+            "        MAX(CASE WHEN \"CTC_UPDATE_DELETE\" = 'Y' THEN \"CTC_WRITEID\" END) updateWriteId" +
+            "    FROM \"COMPLETED_TXN_COMPONENTS\"" +
+            "    GROUP BY \"CTC_DATABASE\", \"CTC_TABLE\", \"CTC_PARTITION\"" +
+            ") c ON " +
+            "      tc.\"CTC_DATABASE\" = c.\"CTC_DATABASE\" " +
+            "      AND tc.\"CTC_TABLE\" = c.\"CTC_TABLE\"" +
+            "      AND (tc.\"CTC_PARTITION\" = c.\"CTC_PARTITION\" OR (tc.\"CTC_PARTITION\" IS NULL AND c.\"CTC_PARTITION\" IS NULL)) " +
+            "WHERE tc.\"CTC_WRITEID\" < c.\"highestWriteId\" " +
+            (MYSQL == databaseProduct.dbType ?
+                "  AND NOT tc.\"CTC_WRITEID\" <=> c.updateWriteId" :
+                "  AND (tc.\"CTC_WRITEID\" != c.updateWriteId OR c.updateWriteId IS NULL)");
+        
+      case DERBY: 
+      case ORACLE:
+      case POSTGRES:
+      case CUSTOM:
+        String existsSubQuery =   
+          "EXISTS (" +
+          "    SELECT 1" +
+          "    FROM \"COMPLETED_TXN_COMPONENTS\" c" +
+          "    WHERE tc.\"CTC_DATABASE\" = c.\"CTC_DATABASE\"" +
+          "      AND tc.\"CTC_TABLE\" = c.\"CTC_TABLE\"" +
+          "      AND %s" +
+          "      AND (tc.\"CTC_UPDATE_DELETE\" = 'N' OR c.\"CTC_UPDATE_DELETE\" = 'Y')" +
+          "      AND tc.\"CTC_WRITEID\" < c.\"CTC_WRITEID\"" +
+          ")";
+        
+        return new StringBuilder()
+          .append("DELETE FROM \"COMPLETED_TXN_COMPONENTS\" tc WHERE ") 
+          .append(String.format(existsSubQuery, 
+              "tc.\"CTC_PARTITION\" = c.\"CTC_PARTITION\""))
+          .append(" OR ")
+          .append(String.format(existsSubQuery, 
+              "tc.\"CTC_PARTITION\" IS NULL AND c.\"CTC_PARTITION\" IS NULL"))
+          .toString();
+     
+      default:
+        String msg = "Unknown database product: " + databaseProduct.dbType;
+        throw new MetaException(msg);
+    }
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource();
+  }
+  
+  public static RemoveDuplicateCompleteTxnComponentsCommand INSTANCE = new RemoveDuplicateCompleteTxnComponentsCommand();
+  
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveTxnsFromMinHistoryLevelCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveTxnsFromMinHistoryLevelCommand.java
new file mode 100644
index 000000000000..2b69264809a6
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveTxnsFromMinHistoryLevelCommand.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ConditionalCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.InClauseBatchCommand;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.List;
+
+public class RemoveTxnsFromMinHistoryLevelCommand extends InClauseBatchCommand implements ConditionalCommand {
+
+  public RemoveTxnsFromMinHistoryLevelCommand(List txnids) {
+    super("DELETE FROM \"MIN_HISTORY_LEVEL\" WHERE \"MHL_TXNID\" IN (:txnIds)",
+        new MapSqlParameterSource().addValue("txnIds", txnids), "txnIds", Long::compareTo);
+  }
+
+  @Override
+  public boolean shouldBeUsed(DatabaseProduct databaseProduct) {
+    return TxnHandler.ConfVars.useMinHistoryLevel();
+  }
+
+  @Override
+  public void onError(DatabaseProduct databaseProduct, Exception e) {
+    if (databaseProduct.isTableNotExistsError(e)) {
+      TxnHandler.ConfVars.setUseMinHistoryLevel(false);
+    }
+  }  
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveWriteIdsFromMinHistoryCommand.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveWriteIdsFromMinHistoryCommand.java
new file mode 100644
index 000000000000..0460af7f38a3
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/commands/RemoveWriteIdsFromMinHistoryCommand.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.commands;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.ConditionalCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.InClauseBatchCommand;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.List;
+
+public class RemoveWriteIdsFromMinHistoryCommand extends InClauseBatchCommand implements ConditionalCommand {
+
+  public RemoveWriteIdsFromMinHistoryCommand(List txnids) {
+    super("DELETE FROM \"MIN_HISTORY_WRITE_ID\" WHERE \"MH_TXNID\" IN (:txnIds)",
+        new MapSqlParameterSource().addValue("txnIds", txnids), "txnIds", Long::compareTo);
+  }
+
+  @Override
+  public boolean shouldBeUsed(DatabaseProduct databaseProduct) {
+    return TxnHandler.ConfVars.useMinHistoryWriteId();
+  }
+
+  @Override
+  public void onError(DatabaseProduct databaseProduct, Exception e) {
+    if (databaseProduct.isTableNotExistsError(e)) {
+      // If the table does not exists anymore, we disable the flag and start to work the new way
+      // This enables to switch to the new functionality without a restart
+      TxnHandler.ConfVars.setUseMinHistoryWriteId(false);
+    }
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java
new file mode 100644
index 000000000000..bd4fa91961db
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortCompactionFunction.java
@@ -0,0 +1,188 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.AbortCompactResponse;
+import org.apache.hadoop.hive.metastore.api.AbortCompactionRequest;
+import org.apache.hadoop.hive.metastore.api.AbortCompactionResponseElement;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionState;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.InsertCompactionInfoCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.DbTimeHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryCallProperties;
+import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryFunction;
+import org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler;
+import org.apache.thrift.TException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.springframework.transaction.TransactionDefinition.PROPAGATION_REQUIRED;
+
+public class AbortCompactionFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbortCompactionFunction.class);
+
+  public static final String SELECT_COMPACTION_QUEUE_BY_COMPID =
+      "SELECT XX.* FROM ( SELECT " +
+          "   \"CQ_ID\" AS \"CC_ID\", \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\", " +
+          "   \"CQ_STATE\" AS \"CC_STATE\", \"CQ_TYPE\" AS \"CC_TYPE\", \"CQ_TBLPROPERTIES\" AS \"CC_TBLPROPERTIES\", \"CQ_WORKER_ID\" AS \"CC_WORKER_ID\", " +
+          "   \"CQ_START\" AS \"CC_START\", \"CQ_RUN_AS\" AS \"CC_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\" AS \"CC_HIGHEST_WRITE_ID\", \"CQ_META_INFO\" AS \"CC_META_INFO\"," +
+          "   \"CQ_HADOOP_JOB_ID\" AS \"CC_HADOOP_JOB_ID\", \"CQ_ERROR_MESSAGE\" AS \"CC_ERROR_MESSAGE\",  \"CQ_ENQUEUE_TIME\" AS \"CC_ENQUEUE_TIME\"," +
+          "   \"CQ_WORKER_VERSION\" AS \"CC_WORKER_VERSION\", \"CQ_INITIATOR_ID\" AS \"CC_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\" AS \"CC_INITIATOR_VERSION\", " +
+          "   \"CQ_RETRY_RETENTION\" AS \"CC_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\" AS \"CC_NEXT_TXN_ID\", \"CQ_TXN_ID\" AS \"CC_TXN_ID\", " +
+          "   \"CQ_COMMIT_TIME\" AS \"CC_COMMIT_TIME\", \"CQ_POOL_NAME\" AS \"CC_POOL_NAME\",  " +
+          "   \"CQ_NUMBER_OF_BUCKETS\" AS \"CC_NUMBER_OF_BUCKETS\", \"CQ_ORDER_BY\" AS \"CC_ORDER_BY\" " +
+          "   FROM " +
+          "   \"COMPACTION_QUEUE\" " +
+          "   UNION ALL " +
+          "   SELECT " +
+          "   \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", " +
+          "   \"CC_TBLPROPERTIES\", \"CC_WORKER_ID\", \"CC_START\", \"CC_RUN_AS\", " +
+          "   \"CC_HIGHEST_WRITE_ID\", \"CC_META_INFO\", \"CC_HADOOP_JOB_ID\", \"CC_ERROR_MESSAGE\", " +
+          "   \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", " +
+          "    -1 , \"CC_NEXT_TXN_ID\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", \"CC_POOL_NAME\", " +
+          "   \"CC_NUMBER_OF_BUCKETS\", \"CC_ORDER_BY\" " +
+          "   FROM   " +
+          "   \"COMPLETED_COMPACTIONS\") XX WHERE \"CC_ID\" IN (:ids) ";
+  
+
+  private final AbortCompactionRequest reqst;
+  private final SqlRetryHandler sqlRetryHandler;
+
+  public AbortCompactionFunction(AbortCompactionRequest reqst, SqlRetryHandler sqlRetryHandler) {
+    this.reqst = reqst;
+    this.sqlRetryHandler = sqlRetryHandler;
+  }
+
+  @Override
+  public AbortCompactResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    Map abortCompactionResponseElements = new HashMap<>();
+    AbortCompactResponse response = new AbortCompactResponse(new HashMap<>());
+    response.setAbortedcompacts(abortCompactionResponseElements);
+
+    reqst.getCompactionIds().forEach(x -> abortCompactionResponseElements.put(x, getAbortCompactionResponseElement(x,"Error","No Such Compaction Id Available")));
+
+    List eligibleCompactionsToAbort = 
+        findEligibleCompactionsToAbort(jdbcResource, abortCompactionResponseElements, reqst.getCompactionIds());
+    
+    CompactionAborter aborter = new CompactionAborter(jdbcResource);    
+    for (CompactionInfo compactionInfo : eligibleCompactionsToAbort) {
+      try {
+        AbortCompactionResponseElement responseElement = sqlRetryHandler.executeWithRetry(
+            new SqlRetryCallProperties().withCallerId("abortCompaction"), 
+            aborter.withCompactionInfo(compactionInfo));
+        abortCompactionResponseElements.put(compactionInfo.id, responseElement);
+      } catch (TException e) {
+        throw new MetaException(e.getMessage());
+      }
+    }
+    return response;
+  }
+
+  private List findEligibleCompactionsToAbort(MultiDataSourceJdbcResource jdbcResource,
+      Map abortCompactionResponseElements, List requestedCompId) {
+
+    return jdbcResource.getJdbcTemplate().query(
+        SELECT_COMPACTION_QUEUE_BY_COMPID,
+        new MapSqlParameterSource().addValue("ids", requestedCompId),
+        rs -> {
+          List compactionInfoList = new ArrayList<>();
+          while (rs.next()) {
+            char compState = rs.getString(5).charAt(0);
+            long compID = rs.getLong(1);
+            if (CompactionState.INITIATED.equals(CompactionState.fromSqlConst(compState))) {
+              compactionInfoList.add(CompactionInfo.loadFullFromCompactionQueue(rs));
+            } else {
+              abortCompactionResponseElements.put(compID, getAbortCompactionResponseElement(compID,"Error",
+                  "Error while aborting compaction as compaction is in state-" + CompactionState.fromSqlConst(compState)));
+            }
+          }
+          return compactionInfoList;
+        });    
+  }
+
+  private AbortCompactionResponseElement getAbortCompactionResponseElement(long compactionId, String status, String message) {
+    AbortCompactionResponseElement resEle = new AbortCompactionResponseElement(compactionId);
+    resEle.setMessage(message);
+    resEle.setStatus(status);
+    return resEle;
+  }
+
+
+  private class CompactionAborter implements SqlRetryFunction {
+
+    private final MultiDataSourceJdbcResource jdbcResource;
+    private CompactionInfo compactionInfo;
+
+    public CompactionAborter(MultiDataSourceJdbcResource jdbcResource) {
+      this.jdbcResource = jdbcResource;
+    }
+
+    public CompactionAborter withCompactionInfo(CompactionInfo compactionInfo) {
+      this.compactionInfo = compactionInfo;
+      return this;
+    }
+
+    @Override
+    public AbortCompactionResponseElement execute() {
+      try (TransactionContext context = jdbcResource.getTransactionManager().getNewTransaction(PROPAGATION_REQUIRED)) {
+        compactionInfo.state = TxnStore.ABORTED_STATE;
+        compactionInfo.errorMessage = "Compaction Aborted by Abort Comapction request.";
+        int updCount;
+        try {
+          updCount = jdbcResource.execute(new InsertCompactionInfoCommand(compactionInfo, jdbcResource.execute(new DbTimeHandler()).getTime()));
+        } catch (Exception e) {
+          LOG.error("Unable to update compaction record: {}.", compactionInfo);
+          return getAbortCompactionResponseElement(compactionInfo.id, "Error",
+              "Error while aborting compaction:Unable to update compaction record in COMPLETED_COMPACTIONS");
+        }
+        LOG.debug("Inserted {} entries into COMPLETED_COMPACTIONS", updCount);
+        try {
+          updCount = jdbcResource.getJdbcTemplate().update("DELETE FROM \"COMPACTION_QUEUE\" WHERE \"CQ_ID\" = :id",
+              new MapSqlParameterSource().addValue("id", compactionInfo.id));
+          if (updCount != 1) {
+            LOG.error("Unable to update compaction record: {}. updCnt={}", compactionInfo, updCount);
+            return getAbortCompactionResponseElement(compactionInfo.id, "Error",
+                "Error while aborting compaction: Unable to update compaction record in COMPACTION_QUEUE");
+          } else {
+            jdbcResource.getTransactionManager().commit(context);
+            return getAbortCompactionResponseElement(compactionInfo.id, "Success",
+                "Successfully aborted compaction");
+          }
+        } catch (DataAccessException e) {
+          return getAbortCompactionResponseElement(compactionInfo.id, "Error",
+              "Error while aborting compaction:" + e.getMessage());
+        }
+      }
+    }
+
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortTxnFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortTxnFunction.java
new file mode 100644
index 000000000000..72c19e4c6e72
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortTxnFunction.java
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.AbortTxnRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.DeleteReplTxnMapEntryCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.FindTxnStateHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetOpenTxnTypeAndLockHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.TargetTxnIdListHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.Collections;
+import java.util.List;
+
+public class AbortTxnFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbortTxnFunction.class);
+
+  public AbortTxnFunction(AbortTxnRequest rqst) {
+    this.rqst = rqst;
+  }
+
+  private final AbortTxnRequest rqst;
+  
+  @Override
+  public TxnType execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, NoSuchTxnException, TxnAbortedException {
+    long txnid = rqst.getTxnid();
+    TxnErrorMsg txnErrorMsg = TxnErrorMsg.NONE;
+    long sourceTxnId = -1;
+    boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type());
+    boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type());
+    if (isReplayedReplTxn) {
+      assert (rqst.isSetReplPolicy());
+      sourceTxnId = rqst.getTxnid();
+      List targetTxnIds = jdbcResource.execute(new TargetTxnIdListHandler(rqst.getReplPolicy(), Collections.singletonList(sourceTxnId)));
+      if (targetTxnIds.isEmpty()) {
+        // Idempotent case where txn was already closed or abort txn event received without
+        // corresponding open txn event.
+        LOG.info("Target txn id is missing for source txn id : {} and repl policy {}", sourceTxnId,
+            rqst.getReplPolicy());
+        return null;
+      }
+      assert targetTxnIds.size() == 1;
+      txnid = targetTxnIds.get(0);
+    }
+
+    TxnType txnType = jdbcResource.execute(new GetOpenTxnTypeAndLockHandler(jdbcResource.getSqlGenerator(), txnid));
+    if (txnType == null) {
+      TxnStatus status = jdbcResource.execute(new FindTxnStateHandler(txnid));
+      if (status == TxnStatus.ABORTED) {
+        if (isReplayedReplTxn) {
+          // in case of replication, idempotent is taken care by getTargetTxnId
+          LOG.warn("Invalid state ABORTED for transactions started using replication replay task");
+          jdbcResource.execute(new DeleteReplTxnMapEntryCommand(sourceTxnId, rqst.getReplPolicy()));
+        }
+        LOG.info("abortTxn({}) requested by it is already {}", JavaUtils.txnIdToString(txnid), TxnStatus.ABORTED);
+        return null;
+      }
+      TxnUtils.raiseTxnUnexpectedState(status, txnid);
+    }
+
+    if (isReplayedReplTxn) {
+      txnErrorMsg = TxnErrorMsg.ABORT_REPLAYED_REPL_TXN;
+    } else if (isHiveReplTxn) {
+      txnErrorMsg = TxnErrorMsg.ABORT_DEFAULT_REPL_TXN;
+    } else if (rqst.isSetErrorCode()) {
+      txnErrorMsg = TxnErrorMsg.getTxnErrorMsg(rqst.getErrorCode());
+    }
+
+    new AbortTxnsFunction(Collections.singletonList(txnid), false, true,
+        isReplayedReplTxn, txnErrorMsg).execute(jdbcResource);
+
+    if (isReplayedReplTxn) {
+      jdbcResource.execute(new DeleteReplTxnMapEntryCommand(sourceTxnId, rqst.getReplPolicy()));
+    }
+    
+    return txnType;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortTxnsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortTxnsFunction.java
new file mode 100644
index 000000000000..c280b85222c6
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AbortTxnsFunction.java
@@ -0,0 +1,209 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.common.repl.ReplConst;
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveTxnsFromMinHistoryLevelCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveWriteIdsFromMinHistoryCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetDatabaseIdHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.UncategorizedSQLException;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.executeQueriesInBatch;
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.executeQueriesInBatchNoCount;
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class AbortTxnsFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbortTxnsFunction.class);
+
+  private final List txnids;
+  private final boolean checkHeartbeat;
+  private final boolean skipCount;
+  private final boolean isReplReplayed;
+  private final TxnErrorMsg txnErrorMsg;
+
+  /**
+   * TODO: expose this as an operation to client.  Useful for streaming API to abort all remaining
+   * transactions in a batch on IOExceptions.
+   * Caller must rollback the transaction if not all transactions were aborted since this will not
+   * attempt to delete associated locks in this case.
+   *
+   * @param txnids list of transactions to abort
+   * @param checkHeartbeat value used by {@code  org.apache.hadoop.hive.metastore.txn.TxnHandler#performTimeOuts()} 
+   *                       to ensure this doesn't Abort txn which were heartbeated after #performTimeOuts() select 
+   *                       and this operation.
+   * @param skipCount If true, the method always returns 0, otherwise returns the number of actually aborted txns
+   */
+  public AbortTxnsFunction(List txnids, boolean checkHeartbeat, boolean skipCount, boolean isReplReplayed, 
+                           TxnErrorMsg txnErrorMsg) {
+    this.txnids = txnids;
+    this.checkHeartbeat = checkHeartbeat;
+    this.skipCount = skipCount;
+    this.isReplReplayed = isReplReplayed;
+    this.txnErrorMsg = txnErrorMsg;
+  }
+
+  /**
+   * @param jdbcResource A {@link MultiDataSourceJdbcResource} instance responsible for providing all the necessary 
+   *                     resources to be able to perform transactional database calls.
+   * @return 0 if skipCount is true, the number of aborted transactions otherwise
+   */
+  @Override
+  public Integer execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    if (txnids.isEmpty()) {
+      return 0;
+    }
+    Configuration conf = jdbcResource.getConf();
+    Collections.sort(txnids);    
+    LOG.debug("Aborting {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg);
+    
+    int maxBatchSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.JDBC_MAX_BATCH_SIZE);    
+    jdbcResource.execute(new RemoveTxnsFromMinHistoryLevelCommand(txnids));
+    jdbcResource.execute(new RemoveWriteIdsFromMinHistoryCommand(txnids));
+    
+    Connection dbConn = jdbcResource.getConnection();
+    try {
+      DatabaseProduct dbProduct = jdbcResource.getDatabaseProduct();
+      //This is an update statement, thus at any Isolation level will take Write locks so will block
+      //all other ops using S4U on TXNS row.
+      List queries = new ArrayList<>();
+      StringBuilder prefix = new StringBuilder();
+      StringBuilder suffix = new StringBuilder();
+
+      // add update txns queries to query list
+      prefix.append("UPDATE \"TXNS\" SET \"TXN_STATE\" = ").append(TxnStatus.ABORTED)
+          .append(" , \"TXN_META_INFO\" = ").append(txnErrorMsg.toSqlString())
+          .append(" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN).append(" AND ");
+      if (checkHeartbeat) {
+        suffix.append(" AND \"TXN_LAST_HEARTBEAT\" < ")
+            .append(getEpochFn(dbProduct)).append("-")
+            .append(MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS));
+      }
+      TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"TXN_ID\"", true, false);
+      int numUpdateQueries = queries.size();
+
+      // add delete hive locks queries to query list
+      prefix.setLength(0);
+      suffix.setLength(0);
+      prefix.append("DELETE FROM \"HIVE_LOCKS\" WHERE ");
+      TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"HL_TXNID\"", false, false);
+
+      //If this abort is for REPL_CREATED TXN initiated outside the replication flow, then clean the corresponding entry
+      //from REPL_TXN_MAP and mark that database as replication incompatible.
+      if (!isReplReplayed) {
+        for (String database : getDbNamesForReplayedTxns(jdbcResource, dbConn, txnids)) {
+          markDbAsReplIncompatible(jdbcResource, database);
+        }
+        // Delete mapping from REPL_TXN_MAP if it exists.
+        prefix.setLength(0);
+        suffix.setLength(0);
+        prefix.append("DELETE FROM \"REPL_TXN_MAP\" WHERE ");
+        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "\"RTM_TARGET_TXN_ID\"", false, false);
+      }
+
+      int numAborted = 0;
+      try (Statement stmt = dbConn.createStatement()) {
+        // execute all queries in the list in one batch
+        if (skipCount) {
+          executeQueriesInBatchNoCount(dbProduct, stmt, queries, maxBatchSize);
+        } else {
+          List affectedRowsByQuery = executeQueriesInBatch(stmt, queries, maxBatchSize);
+          numAborted = getUpdateCount(numUpdateQueries, affectedRowsByQuery);
+        }
+      }
+
+      if (MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) {
+        Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_ABORTED_TXNS).inc(txnids.size());
+      }
+      LOG.warn("Aborted {} transaction(s) {} due to {}", txnids.size(), txnids, txnErrorMsg);
+      return numAborted;
+    } catch (SQLException e) {
+      throw new UncategorizedSQLException(null, null, e);
+    }
+  }
+
+  private Set getDbNamesForReplayedTxns(MultiDataSourceJdbcResource jdbcResource, Connection dbConn, 
+                                                List targetTxnIds) throws SQLException {
+    Set dbNames = new HashSet<>();
+    if (targetTxnIds.isEmpty()) {
+      return dbNames;
+    }
+    List inQueries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+    prefix.append("SELECT \"RTM_REPL_POLICY\" FROM \"REPL_TXN_MAP\" WHERE ");
+    TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), inQueries, prefix, new StringBuilder(), targetTxnIds,
+        "\"RTM_TARGET_TXN_ID\"", false, false);
+    for (String query : inQueries) {
+      LOG.debug("Going to execute select <{}>", query);
+      try (PreparedStatement pst = jdbcResource.getSqlGenerator().prepareStmtWithParameters(dbConn, query, null);
+           ResultSet rs = pst.executeQuery()) {
+        while (rs.next()) {
+          dbNames.add(MetaStoreUtils.getDbNameFromReplPolicy(rs.getString(1)));
+        }
+      }
+    }
+    return dbNames;
+  }
+
+  private void markDbAsReplIncompatible(MultiDataSourceJdbcResource jdbcResource, String database) throws SQLException, MetaException {
+    try (Statement stmt = jdbcResource.getConnection().createStatement()){
+      String catalog = MetaStoreUtils.getDefaultCatalog(jdbcResource.getConf());
+      String s = jdbcResource.getSqlGenerator().getDbProduct().getPrepareTxnStmt();
+      if (s != null) {
+        stmt.execute(s);
+      }
+      long dbId = jdbcResource.execute(new GetDatabaseIdHandler(database, catalog));
+      new UpdataDatabasePropFunction(database, dbId, ReplConst.REPL_INCOMPATIBLE, ReplConst.TRUE).execute(jdbcResource);
+    }
+  }
+
+  private int getUpdateCount(int numUpdateQueries, List affectedRowsByQuery) {
+    return affectedRowsByQuery.stream()
+        .limit(numUpdateQueries)
+        .mapToInt(Integer::intValue)
+        .sum();
+  }
+
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AcquireTxnLockFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AcquireTxnLockFunction.java
new file mode 100644
index 000000000000..eb2d47eea4c0
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AcquireTxnLockFunction.java
@@ -0,0 +1,49 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.Statement;
+
+public class AcquireTxnLockFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AcquireTxnLockFunction.class.getName());
+  
+  private final boolean shared;
+
+  public AcquireTxnLockFunction(boolean shared) {
+    this.shared = shared;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    String sqlStmt = jdbcResource.getSqlGenerator().createTxnLockStatement(shared);
+    jdbcResource.getJdbcTemplate().getJdbcTemplate().execute((Statement stmt) -> {
+      stmt.execute(sqlStmt);
+      return null;
+    });
+    LOG.debug("TXN lock locked by '{}' in mode {}", JavaUtils.hostname(), shared);
+    return null;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AllocateTableWriteIdsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AllocateTableWriteIdsFunction.java
new file mode 100644
index 000000000000..247c9345ea29
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/AllocateTableWriteIdsFunction.java
@@ -0,0 +1,348 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.AllocateTableWriteIdsResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.AddWriteIdsToTxnToWriteIdCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.TargetTxnIdListHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.InClauseBatchCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.UncategorizedSQLException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class AllocateTableWriteIdsFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(AbortTxnFunction.class);
+
+  private final AllocateTableWriteIdsRequest rqst;
+  private final List transactionalListeners;
+
+  public AllocateTableWriteIdsFunction(AllocateTableWriteIdsRequest rqst, List transactionalListeners) {
+    this.rqst = rqst;
+    this.transactionalListeners = transactionalListeners;
+  }
+
+  @Override
+  public AllocateTableWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    List txnIds;
+    String dbName = rqst.getDbName().toLowerCase();
+    String tblName = rqst.getTableName().toLowerCase();
+    boolean shouldReallocate = rqst.isReallocate();
+    Connection dbConn = jdbcResource.getConnection();
+    List txnToWriteIds = new ArrayList<>();
+    List srcTxnToWriteIds = null;
+
+    if (rqst.isSetReplPolicy()) {
+      srcTxnToWriteIds = rqst.getSrcTxnToWriteIdList();
+      List srcTxnIds = new ArrayList<>();
+      assert (rqst.isSetSrcTxnToWriteIdList());
+      assert (!rqst.isSetTxnIds());
+      assert (!srcTxnToWriteIds.isEmpty());
+
+      for (TxnToWriteId txnToWriteId : srcTxnToWriteIds) {
+        srcTxnIds.add(txnToWriteId.getTxnId());
+      }
+      txnIds = jdbcResource.execute(new TargetTxnIdListHandler(rqst.getReplPolicy(), srcTxnIds));
+      if (srcTxnIds.size() != txnIds.size()) {
+        // Idempotent case where txn was already closed but gets allocate write id event.
+        // So, just ignore it and return empty list.
+        LOG.info("Idempotent case: Target txn id is missing for source txn id : {} and repl policy {}", srcTxnIds,
+            rqst.getReplPolicy());
+        return new AllocateTableWriteIdsResponse(txnToWriteIds);
+      }
+    } else {
+      assert (!rqst.isSetSrcTxnToWriteIdList());
+      assert (rqst.isSetTxnIds());
+      txnIds = rqst.getTxnIds();
+    }
+
+    //Easiest check since we can't differentiate do we handle singleton list or list with multiple txn ids.
+    if (txnIds.size() > 1) {
+      Collections.sort(txnIds); //easier to read logs and for assumption done in replication flow
+    }
+
+    // Check if all the input txns are in valid state.
+    // Write IDs should be allocated only for open and not read-only transactions.
+    if (!isTxnsOpenAndNotReadOnly(jdbcResource, txnIds)) {
+      String errorMsg = "Write ID allocation on " + TableName.getDbTable(dbName, tblName)
+          + " failed for input txns: "
+          + getAbortedAndReadOnlyTxns(jdbcResource, txnIds)
+          + getCommittedTxns(jdbcResource, txnIds);
+      LOG.error(errorMsg);
+
+      throw new IllegalStateException("Write ID allocation failed on " + TableName.getDbTable(dbName, tblName)
+          + " as not all input txns in open state or read-only");
+    }
+
+    List queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+    StringBuilder suffix = new StringBuilder();
+    long writeId;
+    int allocatedTxnsCount = 0;
+    List params = Arrays.asList(dbName, tblName);
+
+    if (shouldReallocate) {
+      // during query recompilation after lock acquistion, it is important to realloc new writeIds
+      // to ensure writeIds are committed in increasing order.
+      jdbcResource.execute(new InClauseBatchCommand<>(
+          "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = :dbName AND \"T2W_TABLE\" = :tableName AND " +
+              "\"T2W_TXNID\" IN (:txnIds)", 
+          new MapSqlParameterSource()
+              .addValue("dbName", dbName)
+              .addValue("tableName", tblName)
+              .addValue("txnIds", txnIds),
+          "txnIds", Long::compareTo));
+    } else {
+      // Traverse the TXN_TO_WRITE_ID to see if any of the input txns already have allocated a
+      // write id for the same db.table. If yes, then need to reuse it else have to allocate new one
+      // The write id would have been already allocated in case of multi-statement txns where
+      // first write on a table will allocate write id and rest of the writes should re-use it.
+      prefix.append("SELECT \"T2W_TXNID\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE")
+          .append(" \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND ");
+      TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries, prefix, suffix,
+          txnIds, "\"T2W_TXNID\"", false, false);
+      for (String query : queries) {
+        try (PreparedStatement pStmt = jdbcResource.getSqlGenerator().prepareStmtWithParameters(dbConn, query, params)) {
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Going to execute query <" + query.replace("?", "'{}'") + ">", dbName, tblName);
+          }
+          try (ResultSet rs = pStmt.executeQuery()) {
+            while (rs.next()) {
+              // If table write ID is already allocated for the given transaction, then just use it
+              long txnId = rs.getLong(1);
+              writeId = rs.getLong(2);
+              txnToWriteIds.add(new TxnToWriteId(txnId, writeId));
+              allocatedTxnsCount++;
+              LOG.info("Reused already allocated writeID: {} for txnId: {}", writeId, txnId);
+            }            
+          }          
+        } catch (SQLException e) {
+          throw new UncategorizedSQLException(null, null, e);
+        }
+      }
+    }
+
+    // Batch allocation should always happen atomically. Either write ids for all txns is allocated or none.
+    long numOfWriteIds = txnIds.size();
+    assert ((allocatedTxnsCount == 0) || (numOfWriteIds == allocatedTxnsCount));
+    if (allocatedTxnsCount == numOfWriteIds) {
+      // If all the txns in the list have pre-allocated write ids for the given table, then just return.
+      // This is for idempotent case.
+      return new AllocateTableWriteIdsResponse(txnToWriteIds);
+    }
+
+    long srcWriteId = 0;
+    if (rqst.isSetReplPolicy()) {
+      // In replication flow, we always need to allocate write ID equal to that of source.
+      assert (srcTxnToWriteIds != null);
+      srcWriteId = srcTxnToWriteIds.get(0).getWriteId();
+    }
+
+
+    // There are some txns in the list which does not have write id allocated and hence go ahead and do it.
+    // Get the next write id for the given table and update it with new next write id.
+    // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID
+    String query = jdbcResource.getSqlGenerator().addForUpdateClause(
+        "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName");
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Going to execute query {}", query);
+    }
+    
+    Long nextWriteId = jdbcResource.getJdbcTemplate().query(query, 
+        new MapSqlParameterSource()
+            .addValue("dbName", dbName)
+            .addValue("tableName", tblName),
+        (ResultSet rs) -> rs.next() ? rs.getLong(1) : null);
+    
+    if (nextWriteId == null) {
+      query = "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") " +
+          "VALUES (:dbName, :tableName, :nextId)";
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Going to execute query {}", query);
+      }
+      
+      // First allocation of write id should add the table to the next_write_id meta table
+      // The initial value for write id should be 1 and hence we add 1 with number of write ids allocated here
+      // For repl flow, we need to force set the incoming write id.
+      writeId = (srcWriteId > 0) ? srcWriteId : 1;
+      jdbcResource.getJdbcTemplate().update(query,
+          new MapSqlParameterSource()
+              .addValue("dbName", dbName)
+              .addValue("tableName", tblName)
+              .addValue("nextId", writeId + numOfWriteIds));      
+    } else {
+      query = "UPDATE \"NEXT_WRITE_ID\" SET \"NWI_NEXT\" = :nextId WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName";
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Going to execute query {}", query);
+      }
+
+      writeId = (srcWriteId > 0) ? srcWriteId : nextWriteId;
+      // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated
+      jdbcResource.getJdbcTemplate().update(query,
+          new MapSqlParameterSource()
+              .addValue("dbName", dbName)
+              .addValue("tableName", tblName)
+              .addValue("nextId", writeId + numOfWriteIds));
+
+      // For repl flow, if the source write id is mismatching with target next write id, then current
+      // metadata in TXN_TO_WRITE_ID is stale for this table and hence need to clean-up TXN_TO_WRITE_ID.
+      // This is possible in case of first incremental repl after bootstrap where concurrent write
+      // and drop table was performed at source during bootstrap dump.
+      if ((srcWriteId > 0) && (srcWriteId != nextWriteId)) {
+        query = "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = :dbName AND \"T2W_TABLE\" = :tableName";
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Going to execute query {}", query);
+        }
+
+        jdbcResource.getJdbcTemplate().update(query,
+            new MapSqlParameterSource()
+                .addValue("dbName", dbName)
+                .addValue("tableName", tblName));
+      }
+    }
+
+    // Map the newly allocated write ids against the list of txns which doesn't have pre-allocated write ids
+    jdbcResource.execute(new AddWriteIdsToTxnToWriteIdCommand(dbName, tblName, writeId, txnIds, txnToWriteIds));
+
+    if (transactionalListeners != null) {
+      MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+          EventMessage.EventType.ALLOC_WRITE_ID,
+          new AllocWriteIdEvent(txnToWriteIds, dbName, tblName),
+          dbConn, jdbcResource.getSqlGenerator());
+    }
+
+    LOG.info("Allocated write ids for dbName={}, tblName={} (txnIds: {})", dbName, tblName, rqst.getTxnIds());
+    return new AllocateTableWriteIdsResponse(txnToWriteIds);
+  }
+
+  /**
+   * Checks if all the txns in the list are in open state and not read-only.
+   * @param txnIds list of txns to be evaluated for open state/read-only status
+   * @return If all the txns in open state and not read-only, then return true else false
+   */
+  private boolean isTxnsOpenAndNotReadOnly(MultiDataSourceJdbcResource jdbcResource, List txnIds) {
+    List queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+
+    // Get the count of txns from the given list that are in open state and not read-only.
+    // If the returned count is same as the input number of txns, then all txns are in open state and not read-only.
+    prefix.append("SELECT COUNT(*) FROM \"TXNS\" WHERE \"TXN_STATE\" = ").append(TxnStatus.OPEN)
+        .append(" AND \"TXN_TYPE\" != ").append(TxnType.READ_ONLY.getValue()).append(" AND ");
+
+    TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries, prefix, new StringBuilder(),
+        txnIds, "\"TXN_ID\"", false, false);
+
+    AtomicLong count = new AtomicLong(0);
+    for (String query : queries) {
+      LOG.debug("Going to execute query <{}>", query);
+      jdbcResource.getJdbcTemplate().query(query, rs -> {
+        while (rs.next()) {
+          count.set(count.get() + rs.getLong(1));
+        }
+        return null;
+      });
+    }
+    return count.get() == txnIds.size();
+  }
+
+  /**
+   * Get txns from the list that are either aborted or read-only.
+   * @param txnIds list of txns to be evaluated for aborted state/read-only status
+   */
+  private String getAbortedAndReadOnlyTxns(MultiDataSourceJdbcResource jdbcResource, List txnIds) {
+    List queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+
+    // Check if any of the txns in the list are either aborted or read-only.
+    prefix.append("SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\" FROM \"TXNS\" WHERE ");
+    TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries, prefix, new StringBuilder(),
+        txnIds, "\"TXN_ID\"", false, false);
+    StringBuilder txnInfo = new StringBuilder();
+
+    for (String query : queries) {
+      LOG.debug("Going to execute query <{}>", query);
+      jdbcResource.getJdbcTemplate().query(query, rs -> {
+        while (rs.next()) {
+          long txnId = rs.getLong(1);
+          TxnStatus txnState = TxnStatus.fromString(rs.getString(2));
+          TxnType txnType = TxnType.findByValue(rs.getInt(3));
+
+          if (txnState != TxnStatus.OPEN) {
+            txnInfo.append("{").append(txnId).append(",").append(txnState).append("}");
+          } else if (txnType == TxnType.READ_ONLY) {
+            txnInfo.append("{").append(txnId).append(",read-only}");
+          }
+        }
+        return null;
+      });
+    }
+    return txnInfo.toString();
+  }
+
+  /**
+   * Get txns from the list that are committed.
+   * @param txnIds list of txns to be evaluated for committed state
+   */
+  private String getCommittedTxns(MultiDataSourceJdbcResource jdbcResource, List txnIds) {
+    List queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+
+    // Check if any of the txns in the list are committed.
+    prefix.append("SELECT \"CTC_TXNID\" FROM \"COMPLETED_TXN_COMPONENTS\" WHERE ");
+    TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries, prefix, new StringBuilder(),
+        txnIds, "\"CTC_TXNID\"", false, false);
+    StringBuilder txnInfo = new StringBuilder();
+
+    for (String query : queries) {
+      LOG.debug("Going to execute query <{}>", query);
+      jdbcResource.getJdbcTemplate().query(query, rs -> {
+        while (rs.next()) {
+          long txnId = rs.getLong(1);
+          txnInfo.append("{").append(txnId).append(",c}");
+        }          
+        return null;
+      });
+    }
+    return txnInfo.toString();
+  }
+  
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java
new file mode 100644
index 000000000000..af05e1fdd292
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CheckLockFunction.java
@@ -0,0 +1,342 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.entities.LockInfo;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetLocksByLockId;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.apache.hadoop.hive.metastore.utils.LockTypeUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+import static org.apache.hadoop.hive.metastore.txn.entities.LockInfo.LOCK_ACQUIRED;
+
+public class CheckLockFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(CheckLockFunction.class);
+
+  private static final String EXCL_CTAS_ERR_MSG =
+      "Failed to initiate a concurrent CTAS operation with the same table name, lockInfo : %s";
+  private static final String ZERO_WAIT_READ_ERR_MSG = "Unable to acquire read lock due to an existing exclusive lock {%s}";
+  
+  private final long extLockId;
+  private final long txnId;
+  private final boolean zeroWaitReadEnabled;
+  private final boolean isExclusiveCTAS;
+
+  public CheckLockFunction(long extLockId, long txnId, 
+                           boolean zeroWaitReadEnabled, boolean isExclusiveCTAS) {
+    this.extLockId = extLockId;
+    this.txnId = txnId;
+    this.zeroWaitReadEnabled = zeroWaitReadEnabled;
+    this.isExclusiveCTAS = isExclusiveCTAS;
+  }
+
+  @SuppressWarnings("squid:S2583")
+  @Override
+  public LockResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, NoSuchTxnException, TxnAbortedException, NoSuchLockException {
+    LockResponse response = new LockResponse();
+    /**
+     * todo: Longer term we should pass this from client somehow - this would be an optimization;  once
+     * that is in place make sure to build and test "writeSet" below using OperationType not LockType
+     * With Static Partitions we assume that the query modifies exactly the partitions it locked.  (not entirely
+     * realistic since Update/Delete may have some predicate that filters out all records out of
+     * some partition(s), but plausible).  For DP, we acquire locks very wide (all known partitions),
+     * but for most queries only a fraction will actually be updated.  #addDynamicPartitions() tells
+     * us exactly which ones were written to.  Thus using this trick to kill a query early for
+     * DP queries may be too restrictive.
+     */
+    boolean isPartOfDynamicPartitionInsert = true;
+    List locksBeingChecked = getLocksFromLockId(jdbcResource, extLockId); //being acquired now
+    response.setLockid(extLockId);
+
+    //This is the set of entities that the statement represented by extLockId wants to update
+    List writeSet = new ArrayList<>();
+
+    for (LockInfo info : locksBeingChecked) {
+      if (!isPartOfDynamicPartitionInsert && info.getType() == LockType.SHARED_WRITE) {
+        writeSet.add(info);
+      }
+    }
+    if (!writeSet.isEmpty()) {
+      if (writeSet.get(0).getTxnId() == 0) {
+        //Write operation always start a txn
+        throw new IllegalStateException("Found Write lock for " + JavaUtils.lockIdToString(extLockId) + " but no txnid");
+      }
+
+
+      Object[] args = new Object[writeSet.size() * 4 + 1];
+      int index = 0;
+      args[index++] = writeSet.get(0).getTxnId();
+      StringBuilder sb = new StringBuilder(" \"WS_DATABASE\", \"WS_TABLE\", \"WS_PARTITION\", " +
+          "\"WS_TXNID\", \"WS_COMMIT_ID\" " +
+          "FROM \"WRITE_SET\" WHERE WS_COMMIT_ID >= ? AND (");//see commitTxn() for more info on this inequality
+      for (int i = 0; i < writeSet.size(); i++) {
+        sb.append("(\"WS_DATABASE\" = ? AND \"WS_TABLE\" = ? AND (\"WS_PARTITION\" = ? OR ? IS NULL)");
+        if (i < writeSet.size() - 1) {
+          sb.append(" OR ");
+        }
+        sb.append(")");
+        LockInfo info = writeSet.get(i);
+        args[index++] = info.getDb();
+        args[index++] = info.getTable();
+        args[index++] = info.getPartition();
+        args[index++] = info.getPartition();
+      }
+
+      WriteSetInfo wsInfo = jdbcResource.getJdbcTemplate().getJdbcTemplate().query(sb.toString(), args, (ResultSet rs) -> {
+        WriteSetInfo info = null;
+        if (rs.next()) {
+          info = new WriteSetInfo();
+          info.database = rs.getString("WS_DATABASE");
+          info.table = rs.getString("WS_TABLE");
+          info.partition = rs.getString("WS_PARTITION");
+          info.txnId = rs.getLong("WS_TXNID");
+          info.commitId = rs.getLong("WS_COMMIT_ID");
+        }
+        return info;
+      });
+
+      if (wsInfo != null) {
+        /**
+         * if here, it means we found an already committed txn which overlaps with the current one and
+         * it updated the same resource the current txn wants to update.  By First-committer-wins
+         * rule, current txn will not be allowed to commit so  may as well kill it now;  This is just an
+         * optimization to prevent wasting cluster resources to run a query which is known to be DOA.
+         * {@link #commitTxn(CommitTxnRequest)} has the primary responsibility to ensure this.
+         * checkLock() runs at READ_COMMITTED, so you could have another (Hive) txn running commitTxn()
+         * in parallel and thus writing to WRITE_SET.  commitTxn() logic is properly mutexed to ensure
+         * that we don't "miss" any WW conflicts. We could've mutexed the checkLock() and commitTxn()
+         * as well but this reduces concurrency for very little gain.
+         * Note that update/delete (which runs as dynamic partition insert) acquires a lock on the table,
+         * but WRITE_SET has entries for actual partitions updated. Thus this optimization will "miss"
+         * the WW conflict, but it will be caught in commitTxn() where actual partitions written are known.
+         * This is OK since we want 2 concurrent updates that update different sets of partitions to both commit.
+         */
+        String resourceName = wsInfo.database + '/' + wsInfo.table;
+        if (wsInfo.partition != null) {
+          resourceName += '/' + wsInfo.partition;
+        }
+
+        String msg = "Aborting " + JavaUtils.txnIdToString(writeSet.get(0).getTxnId()) +
+            " since a concurrent committed transaction [" + JavaUtils.txnIdToString(wsInfo.txnId) + "," + wsInfo.commitId +
+            "] has already updated resource '" + resourceName + "'";
+        LOG.info(msg);
+        int count = new AbortTxnsFunction(Collections.singletonList(writeSet.get(0).getTxnId()),
+            false, false, false, TxnErrorMsg.ABORT_CONCURRENT).execute(jdbcResource);
+        if (count != 1) {
+          throw new IllegalStateException(msg + " FAILED!");
+        }
+        throw new TxnAbortedException(msg);
+      }
+    }
+
+    String queryStr =
+        " \"EX\".*, \"REQ\".\"HL_LOCK_INT_ID\" \"LOCK_INT_ID\", \"REQ\".\"HL_LOCK_TYPE\" \"LOCK_TYPE\" FROM (" +
+            " SELECT \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\"," +
+            " \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" +
+            " WHERE \"HL_LOCK_EXT_ID\" < " + extLockId + ") \"EX\"" +
+            " INNER JOIN (" +
+            " SELECT \"HL_LOCK_INT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\"," +
+            " \"HL_LOCK_TYPE\" FROM \"HIVE_LOCKS\"" +
+            " WHERE \"HL_LOCK_EXT_ID\" = " + extLockId + ") \"REQ\"" +
+            " ON \"EX\".\"HL_DB\" = \"REQ\".\"HL_DB\"" +
+            " AND (\"EX\".\"HL_TABLE\" IS NULL OR \"REQ\".\"HL_TABLE\" IS NULL" +
+            " OR \"EX\".\"HL_TABLE\" = \"REQ\".\"HL_TABLE\"" +
+            " AND (\"EX\".\"HL_PARTITION\" IS NULL OR \"REQ\".\"HL_PARTITION\" IS NULL" +
+            " OR \"EX\".\"HL_PARTITION\" = \"REQ\".\"HL_PARTITION\"))" +
+        /*different locks from same txn should not conflict with each other,
+          txnId=0 means it's a select or IUD which does not write to ACID table*/
+            " WHERE (\"REQ\".\"HL_TXNID\" = 0 OR \"EX\".\"HL_TXNID\" != \"REQ\".\"HL_TXNID\")" +
+            " AND ";
+
+      /**EXCLUSIVE lock on partition should prevent SHARED_READ on the table, however there is no reason
+        for an EXCLUSIVE on a table to prevent SHARED_READ on a database. Similarly, EXCLUSIVE on a partition
+        should not conflict with SHARED_READ on a database.
+        SHARED_READ is usually acquired on a database to make sure it's not dropped, while some operation
+        is performed on that db (e.g. show tables, created table, etc).
+        EXCLUSIVE on an object may mean it's being dropped or overwritten.*/
+    String[] whereStr = {
+        // shared-read
+        " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.sharedRead() + " AND \"EX\".\"HL_LOCK_TYPE\"=" +
+            LockTypeUtil.exclusive() + " AND NOT (\"EX\".\"HL_TABLE\" IS NOT NULL AND \"REQ\".\"HL_TABLE\" IS NULL)",
+        // exclusive
+        " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.exclusive() +
+            " AND NOT (\"EX\".\"HL_TABLE\" IS NULL AND \"EX\".\"HL_LOCK_TYPE\"=" +
+            LockTypeUtil.sharedRead() + " AND \"REQ\".\"HL_TABLE\" IS NOT NULL)",
+        // shared-write
+        " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.sharedWrite() + " AND \"EX\".\"HL_LOCK_TYPE\" IN (" +
+            LockTypeUtil.exclWrite() + "," + LockTypeUtil.exclusive() + ")",
+        // excl-write
+        " \"REQ\".\"HL_LOCK_TYPE\"=" + LockTypeUtil.exclWrite() + " AND \"EX\".\"HL_LOCK_TYPE\"!=" +
+            LockTypeUtil.sharedRead()
+    };
+
+    List subQuery = new ArrayList<>();
+    for (String subCond : whereStr) {
+      subQuery.add("(" + jdbcResource.getSqlGenerator().addLimitClause(1, queryStr + subCond) + ")");
+    }
+    String query = String.join(" UNION ALL ", subQuery);
+
+    Boolean success = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query(query, new MapSqlParameterSource(), (ResultSet rs) -> {
+      if (rs.next()) {
+        try {
+          // We acquire all locks for a given query atomically; if 1 blocks, all remain in Waiting state.
+          LockInfo blockedBy = new LockInfo(rs);
+          long intLockId = rs.getLong("LOCK_INT_ID");
+          char lockChar = rs.getString("LOCK_TYPE").charAt(0);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug("Failure to acquire lock({} intLockId:{} {}), blocked by ({})", JavaUtils.lockIdToString(extLockId),
+                intLockId, JavaUtils.txnIdToString(txnId), blockedBy);
+          }
+
+          LockType lockType = LockTypeUtil.getLockTypeFromEncoding(lockChar)
+              .orElseThrow(() -> new MetaException("Unknown lock type: " + lockChar));
+
+          if ((zeroWaitReadEnabled && LockType.SHARED_READ == lockType || isExclusiveCTAS) && TxnUtils.isValidTxn(txnId)) {
+            jdbcResource.getJdbcTemplate().update("DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = :extLockId",
+                new MapSqlParameterSource().addValue("extLockId", extLockId));
+
+            response.setErrorMessage(String.format(
+                isExclusiveCTAS ? EXCL_CTAS_ERR_MSG : ZERO_WAIT_READ_ERR_MSG, blockedBy));
+            response.setState(LockState.NOT_ACQUIRED);
+            return false;
+          }
+
+          int updCnt = jdbcResource.getJdbcTemplate().update("UPDATE \"HIVE_LOCKS\"" +
+                  " SET \"HL_BLOCKEDBY_EXT_ID\" = :blockedByExtLockId, \"HL_BLOCKEDBY_INT_ID\" = :blockedByIntLockId " +
+                  " WHERE \"HL_LOCK_EXT_ID\" = :extLockId AND \"HL_LOCK_INT_ID\" = :intLockId",
+              new MapSqlParameterSource()
+                  .addValue("blockedByExtLockId", blockedBy.getExtLockId())
+                  .addValue("blockedByIntLockId", blockedBy.getIntLockId())
+                  .addValue("extLockId", extLockId)
+                  .addValue("intLockId", intLockId));
+
+          if (updCnt != 1) {
+            LOG.error("Failure to update lock (extLockId={}, intLockId={}) with the blocking lock's IDs " +
+                "(extLockId={}, intLockId={})", extLockId, intLockId, blockedBy.getExtLockId(), blockedBy.getIntLockId());
+            throw new RuntimeException("This should never happen: " + JavaUtils.txnIdToString(txnId) + " "
+                + JavaUtils.lockIdToString(extLockId) + " " + intLockId);
+          }
+          response.setState(LockState.WAITING);
+          return false;
+        } catch (MetaException e) {
+          throw new MetaWrapperException(e);
+        }
+      }
+      return true;
+    }), "This never should be null, it's just to suppress warnings");
+
+    if (!success) {
+      return response;
+    }
+
+    // If here, there were no locks that would block any item from 'locksBeingChecked' - acquire them all
+    acquire(jdbcResource, locksBeingChecked);
+
+    // We acquired all the locks, so commit and return acquired.
+    LOG.debug("Successfully acquired locks: {}", locksBeingChecked);
+    response.setState(LockState.ACQUIRED);
+    return response;
+  }
+
+  // NEVER call this function without first calling heartbeat(long, long)
+  private List getLocksFromLockId(MultiDataSourceJdbcResource jdbcResource, long extLockId) throws MetaException {
+    List locks = jdbcResource.execute(new GetLocksByLockId(extLockId, -1, jdbcResource.getSqlGenerator()));
+    if (locks.isEmpty()) {
+      throw new MetaException("This should never happen!  We already " +
+          "checked the lock(" + JavaUtils.lockIdToString(extLockId) + ") existed but now we can't find it!");
+    }
+    LOG.debug("Found {} locks for extLockId={}. Locks: {}", locks.size(), extLockId, locks);
+    return locks;
+  }
+
+  private void acquire(MultiDataSourceJdbcResource jdbcResource, List locksBeingChecked)
+      throws NoSuchLockException, MetaException {
+    if (CollectionUtils.isEmpty(locksBeingChecked)) {
+      return;
+    }
+    long txnId = locksBeingChecked.get(0).getTxnId();
+    long extLockId = locksBeingChecked.get(0).getExtLockId();
+    int rc = jdbcResource.getJdbcTemplate().update("UPDATE \"HIVE_LOCKS\" SET \"HL_LOCK_STATE\" = :state, " +
+        //if lock is part of txn, heartbeat info is in txn record
+        "\"HL_LAST_HEARTBEAT\" = " + (TxnUtils.isValidTxn(txnId) ? 0 : getEpochFn(jdbcResource.getDatabaseProduct())) +
+        ",\"HL_ACQUIRED_AT\" = " + getEpochFn(jdbcResource.getDatabaseProduct()) +
+        ",\"HL_BLOCKEDBY_EXT_ID\"=NULL,\"HL_BLOCKEDBY_INT_ID\"=NULL" +
+        " WHERE \"HL_LOCK_EXT_ID\" = :extLockId", 
+        new MapSqlParameterSource()
+            .addValue("state", Character.toString(LOCK_ACQUIRED), Types.CHAR)
+            .addValue("extLockId", extLockId));
+    
+    if (rc < locksBeingChecked.size()) {
+      LOG.error("Failure to acquire all locks (acquired: {}, total needed: {}).", rc, locksBeingChecked.size());
+      /*select all locks for this ext ID and see which ones are missing*/
+      Set notFoundIds = locksBeingChecked.stream()
+          .map(lockInfo -> Long.toString(lockInfo.getIntLockId()))
+          .collect(Collectors.toSet());
+      List foundIds = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query(
+          "SELECT \"HL_LOCK_INT_ID\" FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = :extLockId",
+          new MapSqlParameterSource().addValue("extLockId", extLockId), rs -> {
+            List ids = new ArrayList<>();
+            while (rs.next()) {
+              ids.add(rs.getString("HL_LOCK_INT_ID"));
+            }
+            return ids;
+          }), "This never should be null, it's just to suppress warnings");
+      
+      foundIds.forEach(notFoundIds::remove);
+      String errorMsg = String.format("No such lock(s): (%s: %s) %s",
+          JavaUtils.lockIdToString(extLockId), String.join(", ", notFoundIds), JavaUtils.txnIdToString(txnId));
+      throw new NoSuchLockException(errorMsg);
+    }
+  }
+
+  static class WriteSetInfo {
+    String database;
+    String table;
+    String partition;
+    Long txnId;
+    Long commitId;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CleanTxnToWriteIdTableFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanTxnToWriteIdTableFunction.java
similarity index 90%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CleanTxnToWriteIdTableFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanTxnToWriteIdTableFunction.java
index 3268f0d2ef18..6457cd27f04a 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CleanTxnToWriteIdTableFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CleanTxnToWriteIdTableFunction.java
@@ -15,10 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
 import org.slf4j.Logger;
@@ -47,21 +48,19 @@ public class CleanTxnToWriteIdTableFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(CleanupRecordsFunction.class);
+  private static final EnumSet HIVE_OBJECT_TYPES = 
+      EnumSet.of(HiveObjectType.DATABASE, HiveObjectType.TABLE, HiveObjectType.PARTITION);
+
+  @SuppressWarnings("squid:S3599")
+  //language=SQL
+  private static final Map, String> DELETE_COMMANDS =
+      new LinkedHashMap, String>() {{
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType),
+            "DELETE FROM \"TXN_COMPONENTS\" WHERE " +
+                "\"TC_DATABASE\" = :dbName AND " +
+                "(\"TC_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+                "(\"TC_PARTITION\" = :partName OR :partName IS NULL)");
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType),
+            "DELETE FROM \"COMPLETED_TXN_COMPONENTS\" WHERE " +
+                "(\"CTC_DATABASE\" = :dbName) AND " +
+                "(\"CTC_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+                "(\"CTC_PARTITION\" = :partName OR :partName IS NULL)");
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType),
+            "DELETE FROM \"COMPACTION_QUEUE\" WHERE " +
+                "\"CQ_DATABASE\" = :dbName AND " +
+                "(\"CQ_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+                "(\"CQ_PARTITION\" = :partName OR :partName IS NULL) AND " +
+                "(\"CQ_TXN_ID\" != :txnId OR :txnId IS NULL)");
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType),
+            "DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE " +
+                "\"CC_DATABASE\" = :dbName AND " +
+                "(\"CC_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+                "(\"CC_PARTITION\" = :partName OR :partName IS NULL)");
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HiveObjectType.DATABASE.equals(hiveObjectType) ||
+                (HiveObjectType.TABLE.equals(hiveObjectType) && !keepTxnToWriteIdMetaData),
+            "DELETE FROM \"TXN_TO_WRITE_ID\" WHERE " +
+                "\"T2W_DATABASE\" = :dbName AND " +
+                "(\"T2W_TABLE\" = :tableName OR :tableName IS NULL)");
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HiveObjectType.DATABASE.equals(hiveObjectType) ||
+                HiveObjectType.TABLE.equals(hiveObjectType) && !keepTxnToWriteIdMetaData,
+            "DELETE FROM \"NEXT_WRITE_ID\" WHERE " +
+                "\"NWI_DATABASE\" = :dbName AND " +
+                "(\"NWI_TABLE\" = :tableName OR :tableName IS NULL)");
+        put((hiveObjectType, keepTxnToWriteIdMetaData) -> HIVE_OBJECT_TYPES.contains(hiveObjectType),
+            "DELETE FROM \"COMPACTION_METRICS_CACHE\" WHERE " +
+                "\"CMC_DATABASE\" = :dbName AND " +
+                "(\"CMC_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+                "(\"CMC_PARTITION\" = :partName OR :partName IS NULL)");
+      }};
+
+  private final HiveObjectType type;
+  private final Database db;
+  private final Table table;
+  private final Iterator partitionIterator;
+  private final String defaultCatalog;
+  private final boolean keepTxnToWriteIdMetaData;
+  private final Long txnId;
+
+  public CleanupRecordsFunction(HiveObjectType type, Database db, Table table, Iterator partitionIterator,
+                                String defaultCatalog, boolean keepTxnToWriteIdMetaData, Long txnId) {
+    this.type = type;
+    this.db = db;
+    this.table = table;
+    this.partitionIterator = partitionIterator;
+    this.defaultCatalog = defaultCatalog;
+    this.keepTxnToWriteIdMetaData = keepTxnToWriteIdMetaData;
+    this.txnId = txnId;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    // cleanup should be done only for objects belonging to default catalog
+    List paramSources = new ArrayList<>();
+
+    switch (type) {
+      case DATABASE: {
+        if (!defaultCatalog.equals(db.getCatalogName())) {
+          LOG.debug("Skipping cleanup because db: " + db.getName() + " belongs to catalog "
+              + "other than default catalog: " + db.getCatalogName());
+          return null;
+        }
+        paramSources.add(new MapSqlParameterSource()
+            .addValue("dbName", db.getName().toLowerCase())
+            .addValue("tableName", null, Types.VARCHAR)
+            .addValue("partName", null, Types.VARCHAR)
+            .addValue("txnId", txnId, Types.BIGINT));
+        break;
+      }
+      case TABLE: {
+        if (!defaultCatalog.equals(table.getCatName())) {
+          LOG.debug("Skipping cleanup because table: {} belongs to catalog other than default catalog: {}",
+              table.getTableName(), table.getCatName());
+          return null;
+        }
+        paramSources.add(new MapSqlParameterSource()
+            .addValue("dbName", table.getDbName().toLowerCase())
+            .addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR)
+            .addValue("partName", null, Types.VARCHAR)
+            .addValue("txnId", null, Types.BIGINT));
+        break;
+      }
+      case PARTITION: {
+        if (!defaultCatalog.equals(table.getCatName())) {
+          LOG.debug("Skipping cleanup because partitions belong to catalog other than default catalog: {}",
+              table.getCatName());
+          return null;
+        }
+        List partCols = table.getPartitionKeys();  // partition columns
+        List partVals;                                  // partition values
+        while (partitionIterator.hasNext()) {
+          Partition partition = partitionIterator.next();
+          partVals = partition.getValues();
+          paramSources.add(new MapSqlParameterSource()
+              .addValue("dbName", table.getDbName().toLowerCase())
+              .addValue("tableName", table.getTableName().toLowerCase(), Types.VARCHAR)
+              .addValue("partName", Warehouse.makePartName(partCols, partVals), Types.VARCHAR)
+              .addValue("txnId", null, Types.BIGINT));
+        }
+      }
+    }
+
+    try {
+      for (MapSqlParameterSource parameterSource : paramSources) {
+        for (Map.Entry, String> item : DELETE_COMMANDS.entrySet()) {
+          if (item.getKey().apply(type, keepTxnToWriteIdMetaData)) {
+            jdbcResource.getJdbcTemplate().update(item.getValue(), parameterSource);
+          }
+        }
+      }
+    } catch (DataAccessException e) {
+      Throwable ex = e;
+      do {
+        String message = ex.getMessage();
+        if (StringUtils.isNotBlank(message) && message.contains("does not exist")) {
+          LOG.warn("Cannot perform cleanup since metastore table does not exist");
+          return null;
+        }
+        ex = ex.getCause();
+      } while (ex != null);
+      throw e;
+    }
+    return null;
+  }
+
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java
new file mode 100644
index 000000000000..6b5b81aace90
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CommitTxnFunction.java
@@ -0,0 +1,636 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.hadoop.hive.common.repl.ReplConst;
+import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions;
+import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.ReplLastIdInfo;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.CommitCompactionEvent;
+import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.OperationType;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.DeleteReplTxnMapEntryCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.InsertCompletedTxnComponentsCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveTxnsFromMinHistoryLevelCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveWriteIdsFromMinHistoryCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.FindTxnStateHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetCompactionInfoHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetHighWaterMarkHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetOpenTxnTypeAndLockHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.TargetTxnIdListHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.RollbackException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
+import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.Statement;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+
+public class CommitTxnFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(CommitTxnFunction.class);
+
+  private final CommitTxnRequest rqst;
+  private final List transactionalListeners;
+
+  public CommitTxnFunction(CommitTxnRequest rqst, List transactionalListeners) {
+    this.rqst = rqst;
+    this.transactionalListeners = transactionalListeners;
+  }
+
+  @Override
+  public TxnType execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, NoSuchTxnException, TxnAbortedException {
+    char isUpdateDelete = 'N';
+    long txnid = rqst.getTxnid();
+    long sourceTxnId = -1;
+
+    boolean isReplayedReplTxn = TxnType.REPL_CREATED.equals(rqst.getTxn_type());
+    boolean isHiveReplTxn = rqst.isSetReplPolicy() && TxnType.DEFAULT.equals(rqst.getTxn_type());
+    // Get the current TXN
+    TransactionContext context = jdbcResource.getTransactionManager().getActiveTransaction();
+    Long commitId = null;
+
+    if (rqst.isSetReplLastIdInfo()) {
+      updateReplId(jdbcResource, rqst.getReplLastIdInfo());
+    }
+
+    if (isReplayedReplTxn) {
+      assert (rqst.isSetReplPolicy());
+      sourceTxnId = rqst.getTxnid();
+      List targetTxnIds = jdbcResource.execute(new TargetTxnIdListHandler(rqst.getReplPolicy(), Collections.singletonList(sourceTxnId)));
+      if (targetTxnIds.isEmpty()) {
+        // Idempotent case where txn was already closed or commit txn event received without
+        // corresponding open txn event.
+        LOG.info("Target txn id is missing for source txn id : {} and repl policy {}", sourceTxnId,
+            rqst.getReplPolicy());
+        throw new RollbackException(null);
+      }
+      assert targetTxnIds.size() == 1;
+      txnid = targetTxnIds.get(0);
+    }
+
+    /**
+     * Runs at READ_COMMITTED with S4U on TXNS row for "txnid".  S4U ensures that no other
+     * operation can change this txn (such acquiring locks). While lock() and commitTxn()
+     * should not normally run concurrently (for same txn) but could due to bugs in the client
+     * which could then corrupt internal transaction manager state.  Also competes with abortTxn().
+     */
+    TxnType txnType = jdbcResource.execute(new GetOpenTxnTypeAndLockHandler(jdbcResource.getSqlGenerator(), txnid));
+    if (txnType == null) {
+      //if here, txn was not found (in expected state)
+      TxnStatus actualTxnStatus = jdbcResource.execute(new FindTxnStateHandler(txnid));
+      if (actualTxnStatus == TxnStatus.COMMITTED) {
+        if (isReplayedReplTxn) {
+          // in case of replication, idempotent is taken care by getTargetTxnId
+          LOG.warn("Invalid state COMMITTED for transactions started using replication replay task");
+        }
+        /**
+         * This makes the operation idempotent
+         * (assume that this is most likely due to retry logic)
+         */
+        LOG.info("Nth commitTxn({}) msg", JavaUtils.txnIdToString(txnid));
+        return null;
+      }
+      TxnUtils.raiseTxnUnexpectedState(actualTxnStatus, txnid);
+    }
+
+    String conflictSQLSuffix = "FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\"=" + txnid + " AND \"TC_OPERATION_TYPE\" IN (" +
+        OperationType.UPDATE + "," + OperationType.DELETE + ")";
+    long tempCommitId = TxnUtils.generateTemporaryId();
+
+    if (txnType == TxnType.SOFT_DELETE || txnType == TxnType.COMPACTION) {
+      new AcquireTxnLockFunction(false).execute(jdbcResource);
+      commitId = jdbcResource.execute(new GetHighWaterMarkHandler());
+
+    } else if (txnType != TxnType.READ_ONLY && !isReplayedReplTxn) {
+      String writeSetInsertSql = "INSERT INTO \"WRITE_SET\" (\"WS_DATABASE\", \"WS_TABLE\", \"WS_PARTITION\"," +
+          "   \"WS_TXNID\", \"WS_COMMIT_ID\", \"WS_OPERATION_TYPE\")" +
+          " SELECT DISTINCT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_TXNID\", " + tempCommitId + ", \"TC_OPERATION_TYPE\" ";
+
+      boolean isUpdateOrDelete = Boolean.TRUE.equals(jdbcResource.getJdbcTemplate().query(
+          jdbcResource.getSqlGenerator().addLimitClause(1, "\"TC_OPERATION_TYPE\" " + conflictSQLSuffix),
+          ResultSet::next));
+      
+      if (isUpdateOrDelete) {
+        isUpdateDelete = 'Y';
+        //if here it means currently committing txn performed update/delete and we should check WW conflict
+        /**
+         * "select distinct" is used below because
+         * 1. once we get to multi-statement txns, we only care to record that something was updated once
+         * 2. if {@link #addDynamicPartitions(AddDynamicPartitions)} is retried by caller it may create
+         *  duplicate entries in TXN_COMPONENTS
+         * but we want to add a PK on WRITE_SET which won't have unique rows w/o this distinct
+         * even if it includes all of its columns
+         *
+         * First insert into write_set using a temporary commitID, which will be updated in a separate call,
+         * see: {@link #updateWSCommitIdAndCleanUpMetadata(Statement, long, TxnType, Long, long)}}.
+         * This should decrease the scope of the S4U lock on the next_txn_id table.
+         */
+        Object undoWriteSetForCurrentTxn = context.createSavepoint();
+        jdbcResource.getJdbcTemplate().update(
+            writeSetInsertSql + (TxnHandler.ConfVars.useMinHistoryLevel() ? conflictSQLSuffix :
+            "FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\"= :txnId AND \"TC_OPERATION_TYPE\" <> :type"),
+            new MapSqlParameterSource()
+                .addValue("txnId", txnid)
+                .addValue("type", OperationType.COMPACT.getSqlConst()));
+
+        /**
+         * This S4U will mutex with other commitTxn() and openTxns().
+         * -1 below makes txn intervals look like [3,3] [4,4] if all txns are serial
+         * Note: it's possible to have several txns have the same commit id.  Suppose 3 txns start
+         * at the same time and no new txns start until all 3 commit.
+         * We could've incremented the sequence for commitId as well but it doesn't add anything functionally.
+         */
+        new AcquireTxnLockFunction(false).execute(jdbcResource);
+        commitId = jdbcResource.execute(new GetHighWaterMarkHandler());
+
+        if (!rqst.isExclWriteEnabled()) {
+          /**
+           * see if there are any overlapping txns that wrote the same element, i.e. have a conflict
+           * Since entire commit operation is mutexed wrt other start/commit ops,
+           * committed.ws_commit_id <= current.ws_commit_id for all txns
+           * thus if committed.ws_commit_id < current.ws_txnid, transactions do NOT overlap
+           * For example, [17,20] is committed, [6,80] is being committed right now - these overlap
+           * [17,20] committed and [21,21] committing now - these do not overlap.
+           * [17,18] committed and [18,19] committing now - these overlap  (here 18 started while 17 was still running)
+           */
+          WriteSetInfo info = checkForWriteConflict(jdbcResource, txnid);
+          if (info != null) {
+            //found a conflict, so let's abort the txn
+            String committedTxn = "[" + JavaUtils.txnIdToString(info.txnId) + "," + info.committedCommitId + "]";
+            StringBuilder resource = new StringBuilder(info.database).append("/").append(info.table);
+            if (info.partition != null) {
+              resource.append('/').append(info.partition);
+            }
+            String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + commitId + "]" + " due to a write conflict on " + resource +
+                " committed by " + committedTxn + " " + info.currentOperationType + "/" + info.committedOperationType;
+            //remove WRITE_SET info for current txn since it's about to abort
+            context.rollbackToSavepoint(undoWriteSetForCurrentTxn);
+            LOG.info(msg);
+            //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this
+            if (new AbortTxnsFunction(Collections.singletonList(txnid), false, false, 
+                isReplayedReplTxn, TxnErrorMsg.ABORT_WRITE_CONFLICT).execute(jdbcResource) != 1) {
+              throw new IllegalStateException(msg + " FAILED!");
+            }
+            throw new TxnAbortedException(msg);
+          }
+        }
+      } else if (!TxnHandler.ConfVars.useMinHistoryLevel()) {
+        jdbcResource.getJdbcTemplate().update(writeSetInsertSql + "FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\" = :txnId AND \"TC_OPERATION_TYPE\" <> :type",
+            new MapSqlParameterSource()
+                .addValue("txnId", txnid)
+                .addValue("type", OperationType.COMPACT.getSqlConst()));
+        commitId = jdbcResource.execute(new GetHighWaterMarkHandler());
+      }
+    } else {
+      /*
+       * current txn didn't update/delete anything (may have inserted), so just proceed with commit
+       *
+       * We only care about commit id for write txns, so for RO (when supported) txns we don't
+       * have to mutex on NEXT_TXN_ID.
+       * Consider: if RO txn is after a W txn, then RO's openTxns() will be mutexed with W's
+       * commitTxn() because both do S4U on NEXT_TXN_ID and thus RO will see result of W txn.
+       * If RO < W, then there is no reads-from relationship.
+       * In replication flow we don't expect any write write conflict as it should have been handled at source.
+       */
+      assert true;
+    }
+
+
+    if (txnType != TxnType.READ_ONLY && !isReplayedReplTxn && !MetaStoreServerUtils.isCompactionTxn(txnType)) {
+      moveTxnComponentsToCompleted(jdbcResource, txnid, isUpdateDelete);
+    } else if (isReplayedReplTxn) {
+      if (rqst.isSetWriteEventInfos() && !rqst.getWriteEventInfos().isEmpty()) {
+        jdbcResource.execute(new InsertCompletedTxnComponentsCommand(txnid, isUpdateDelete, rqst.getWriteEventInfos()));
+      }
+      jdbcResource.execute(new DeleteReplTxnMapEntryCommand(sourceTxnId, rqst.getReplPolicy()));
+    }
+    updateWSCommitIdAndCleanUpMetadata(jdbcResource, txnid, txnType, commitId, tempCommitId);
+    jdbcResource.execute(new RemoveTxnsFromMinHistoryLevelCommand(ImmutableList.of(txnid)));
+    jdbcResource.execute(new RemoveWriteIdsFromMinHistoryCommand(ImmutableList.of(txnid)));
+    if (rqst.isSetKeyValue()) {
+      updateKeyValueAssociatedWithTxn(jdbcResource, rqst);
+    }
+
+    if (!isHiveReplTxn) {
+      createCommitNotificationEvent(jdbcResource, txnid , txnType);
+    }
+
+    LOG.debug("Going to commit");
+
+    if (MetastoreConf.getBoolVar(jdbcResource.getConf(), MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) {
+      Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_COMMITTED_TXNS).inc();
+    }
+    return txnType;
+  }
+
+  private void updateReplId(MultiDataSourceJdbcResource jdbcResource, ReplLastIdInfo replLastIdInfo) throws MetaException {
+    String lastReplId = Long.toString(replLastIdInfo.getLastReplId());
+    String catalog = replLastIdInfo.isSetCatalog() ? normalizeIdentifier(replLastIdInfo.getCatalog()) :
+        MetaStoreUtils.getDefaultCatalog(jdbcResource.getConf());
+    String db = normalizeIdentifier(replLastIdInfo.getDatabase());
+    String table = replLastIdInfo.isSetTable() ? normalizeIdentifier(replLastIdInfo.getTable()) : null;
+    List partList = replLastIdInfo.isSetPartitionList() ? replLastIdInfo.getPartitionList() : null;
+
+    String s = jdbcResource.getSqlGenerator().getDbProduct().getPrepareTxnStmt();
+    if (s != null) {
+      jdbcResource.getJdbcTemplate().execute(s, ps -> null);
+    }
+
+    // not used select for update as it will be updated by single thread only from repl load
+    long dbId = updateDatabaseProp(jdbcResource, catalog, db, ReplConst.REPL_TARGET_TABLE_PROPERTY, lastReplId);
+    if (table != null) {
+      long tableId = updateTableProp(jdbcResource, catalog, db, dbId, table, ReplConst.REPL_TARGET_TABLE_PROPERTY, lastReplId);
+      if (partList != null && !partList.isEmpty()) {
+        updatePartitionProp(jdbcResource, tableId, partList, ReplConst.REPL_TARGET_TABLE_PROPERTY, lastReplId);
+      }
+    }
+  }
+
+  private long updateDatabaseProp(MultiDataSourceJdbcResource jdbcResource, String catalog, String database, 
+                                  String prop, String propValue) throws MetaException {
+    String query = 
+        "SELECT d.\"DB_ID\", dp.\"PARAM_KEY\", dp.\"PARAM_VALUE\" FROM \"DATABASE_PARAMS\" dp\n" +
+            "RIGHT JOIN \"DBS\" d ON dp.\"DB_ID\" = d.\"DB_ID\" " +
+        "WHERE \"NAME\" = :dbName  and \"CTLG_NAME\" = :catalog";
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Going to execute query <" + query + ">");
+    }
+    DbEntityParam dbEntityParam = jdbcResource.getJdbcTemplate().query(query,
+        new MapSqlParameterSource()
+            .addValue("dbName", database)
+            .addValue("catalog", catalog),
+        //no row means database no found
+        rs -> rs.next()
+            ? new DbEntityParam(rs.getLong("DB_ID"), rs.getString("PARAM_KEY"), rs.getString("PARAM_VALUE"))
+            : null);
+
+    if (dbEntityParam == null) {
+      throw new MetaException("DB with name " + database + " does not exist in catalog " + catalog);
+    }
+
+    //TODO: would be better to replace with MERGE or UPSERT
+    String command;
+    if (dbEntityParam.key == null) {
+      command = "INSERT INTO \"DATABASE_PARAMS\" VALUES (:dbId, :key, :value)";
+    } else if (!dbEntityParam.value.equals(propValue)) {
+      command = "UPDATE \"DATABASE_PARAMS\" SET \"PARAM_VALUE\" = :value WHERE \"DB_ID\" = :dbId AND \"PARAM_KEY\" = :key";
+    } else {
+      LOG.info("Database property: {} with value: {} already updated for db: {}", prop, propValue, database);
+      return dbEntityParam.id;      
+    }
+    
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Updating {} for db: {}  using command {}", prop, database, command);
+    }
+    SqlParameterSource params = new MapSqlParameterSource()
+        .addValue("dbId", dbEntityParam.id)
+        .addValue("key", prop)
+        .addValue("value", propValue);
+    if (jdbcResource.getJdbcTemplate().update(command, params) != 1) {
+      //only one row insert or update should happen
+      throw new RuntimeException("DATABASE_PARAMS is corrupted for database: " + database);
+    }
+    return dbEntityParam.id;
+  }
+
+  private long updateTableProp(MultiDataSourceJdbcResource jdbcResource, String catalog, String db, long dbId,
+                                  String table, String prop, String propValue) throws MetaException {
+    String query = 
+        "SELECT t.\"TBL_ID\", tp.\"PARAM_KEY\", tp.\"PARAM_VALUE\" FROM \"TABLE_PARAMS\" tp " +
+            "RIGHT JOIN \"TBLS\" t ON tp.\"TBL_ID\" = t.\"TBL_ID\" " +
+        "WHERE t.\"DB_ID\" = :dbId AND t.\"TBL_NAME\" = :tableName";
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Going to execute query <" + query + ">");
+    }
+    DbEntityParam dbEntityParam = jdbcResource.getJdbcTemplate().query(query,
+        new MapSqlParameterSource()
+            .addValue("tableName", table)
+            .addValue("dbId", dbId),
+        //no row means table no found
+        rs -> rs.next() 
+            ? new DbEntityParam(rs.getLong("TBL_ID"), rs.getString("PARAM_KEY"), rs.getString("PARAM_VALUE")) 
+            : null);
+
+    if (dbEntityParam == null) {
+      throw new MetaException("Table with name " + table + " does not exist in db " + catalog + "." + db);
+    }
+
+    //TODO: would be better to replace with MERGE or UPSERT
+    String command;
+    if (dbEntityParam.key == null) {
+      command = "INSERT INTO \"TABLE_PARAMS\" VALUES (:tblId, :key, :value)";
+    } else if (!dbEntityParam.value.equals(propValue)) {
+      command = "UPDATE \"TABLE_PARAMS\" SET \"PARAM_VALUE\" = :value WHERE \"TBL_ID\" = :dbId AND \"PARAM_KEY\" = :key";
+    } else {
+      LOG.info("Database property: {} with value: {} already updated for db: {}", prop, propValue, db);
+      return dbEntityParam.id;
+    }
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Updating {} for table: {}  using command {}", prop, table, command);
+    }
+    SqlParameterSource params = new MapSqlParameterSource()
+        .addValue("tblId", dbEntityParam.id)
+        .addValue("key", prop)
+        .addValue("value", propValue);
+    if (jdbcResource.getJdbcTemplate().update(command, params) != 1) {
+      //only one row insert or update should happen
+      throw new RuntimeException("TABLE_PARAMS is corrupted for table: " + table);
+    }
+    return dbEntityParam.id;
+  }
+  
+  private void updatePartitionProp(MultiDataSourceJdbcResource jdbcResource, long tableId,
+                                   List partList, String prop, String propValue) {
+    List queries = new ArrayList<>();
+    StringBuilder prefix = new StringBuilder();
+    StringBuilder suffix = new StringBuilder();
+    //language=SQL
+    prefix.append(
+        "SELECT p.\"PART_ID\", pp.\"PARAM_KEY\", pp.\"PARAM_VALUE\" FROM \"PARTITION_PARAMS\" pp\n" +
+        "RIGHT JOIN \"PARTITIONS\" p ON pp.\"PART_ID\" = p.\"PART_ID\" WHERE p.\"TBL_ID\" = :tblId AND pp.\"PARAM_KEY\" = :key");
+
+    // Populate the complete query with provided prefix and suffix
+    TxnUtils.buildQueryWithINClauseStrings(jdbcResource.getConf(), queries, prefix, suffix, partList,
+        "\"PART_NAME\"", true, false);
+    SqlParameterSource params = new MapSqlParameterSource()
+        .addValue("tblId", tableId)
+        .addValue("key", prop);
+    List partitionParams = new ArrayList<>();
+    for(String query : queries) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Going to execute query <" + query + ">");
+      }
+      jdbcResource.getJdbcTemplate().query(query, params,
+          (ResultSet rs) -> {
+            while (rs.next()) {
+              partitionParams.add(new DbEntityParam(rs.getLong("PART_ID"), rs.getString("PARAM_KEY"), rs.getString("PARAM_VALUE")));
+            }
+          });
+    }
+
+    //TODO: would be better to replace with MERGE or UPSERT
+    int maxBatchSize = MetastoreConf.getIntVar(jdbcResource.getConf(), MetastoreConf.ConfVars.JDBC_MAX_BATCH_SIZE);
+    //all insert in one batch
+    int[][] inserts = jdbcResource.getJdbcTemplate().getJdbcTemplate().batchUpdate(
+        "INSERT INTO \"PARTITION_PARAMS\" VALUES (?, ?, ?)",
+        partitionParams.stream().filter(p -> p.key == null).collect(Collectors.toList()), maxBatchSize,
+        (ps, argument) -> {
+          ps.setLong(1, argument.id);
+          ps.setString(2, argument.key);
+          ps.setString(3, propValue);
+        });
+    //all update in one batch
+    int[][] updates =jdbcResource.getJdbcTemplate().getJdbcTemplate().batchUpdate(
+        "UPDATE \"PARTITION_PARAMS\" SET \"PARAM_VALUE\" = ? WHERE \"PART_ID\" = ? AND \"PARAM_KEY\" = ?",
+        partitionParams.stream().filter(p -> p.key != null && !propValue.equals(p.value)).collect(Collectors.toList()), maxBatchSize,
+        (ps, argument) -> {
+          ps.setString(1, propValue);
+          ps.setLong(2, argument.id);
+          ps.setString(3, argument.key);
+        });
+
+    if (Arrays.stream(inserts).flatMapToInt(IntStream::of).sum() + Arrays.stream(updates).flatMapToInt(IntStream::of).sum() != partList.size()) {
+      throw new RuntimeException("PARTITION_PARAMS is corrupted, update failed");      
+    }    
+  }
+
+  private WriteSetInfo checkForWriteConflict(MultiDataSourceJdbcResource jdbcResource, long txnid) throws MetaException {
+    String writeConflictQuery = jdbcResource.getSqlGenerator().addLimitClause(1, 
+        "\"COMMITTED\".\"WS_TXNID\", \"COMMITTED\".\"WS_COMMIT_ID\", " +
+        "\"COMMITTED\".\"WS_DATABASE\", \"COMMITTED\".\"WS_TABLE\", \"COMMITTED\".\"WS_PARTITION\", " +
+        "\"CUR\".\"WS_COMMIT_ID\" \"CUR_WS_COMMIT_ID\", \"CUR\".\"WS_OPERATION_TYPE\" \"CUR_OP\", " +
+        "\"COMMITTED\".\"WS_OPERATION_TYPE\" \"COMMITTED_OP\" FROM \"WRITE_SET\" \"COMMITTED\" INNER JOIN \"WRITE_SET\" \"CUR\" " +
+        "ON \"COMMITTED\".\"WS_DATABASE\"=\"CUR\".\"WS_DATABASE\" AND \"COMMITTED\".\"WS_TABLE\"=\"CUR\".\"WS_TABLE\" " +
+        //For partitioned table we always track writes at partition level (never at table)
+        //and for non partitioned - always at table level, thus the same table should never
+        //have entries with partition key and w/o
+        "AND (\"COMMITTED\".\"WS_PARTITION\"=\"CUR\".\"WS_PARTITION\" OR (\"COMMITTED\".\"WS_PARTITION\" IS NULL AND \"CUR\".\"WS_PARTITION\" IS NULL)) " +
+        "WHERE \"CUR\".\"WS_TXNID\" <= \"COMMITTED\".\"WS_COMMIT_ID\" " + //txns overlap; could replace ws_txnid
+        // with txnid, though any decent DB should infer this
+        "AND \"CUR\".\"WS_TXNID\"= :txnId " + //make sure RHS of join only has rows we just inserted as
+        // part of this commitTxn() op
+        "AND \"COMMITTED\".\"WS_TXNID\" <> :txnId " + //and LHS only has committed txns
+        //U+U and U+D and D+D is a conflict and we don't currently track I in WRITE_SET at all
+        //it may seem like D+D should not be in conflict but consider 2 multi-stmt txns
+        //where each does "delete X + insert X, where X is a row with the same PK.  This is
+        //equivalent to an update of X but won't be in conflict unless D+D is in conflict.
+        //The same happens when Hive splits U=I+D early so it looks like 2 branches of a
+        //multi-insert stmt (an Insert and a Delete branch).  It also 'feels'
+        // un-serializable to allow concurrent deletes
+        "AND (\"COMMITTED\".\"WS_OPERATION_TYPE\" IN(:opUpdate, :opDelete) " +
+        "AND \"CUR\".\"WS_OPERATION_TYPE\" IN(:opUpdate, :opDelete))");
+    LOG.debug("Going to execute query: <{}>", writeConflictQuery);
+    return jdbcResource.getJdbcTemplate().query(writeConflictQuery,
+        new MapSqlParameterSource()
+            .addValue("txnId", txnid)
+            .addValue("opUpdate", OperationType.UPDATE.getSqlConst())
+            .addValue("opDelete", OperationType.DELETE.getSqlConst()),
+        (ResultSet rs) -> {
+          if(rs.next()) {
+            return new WriteSetInfo(rs.getLong("WS_TXNID"), rs.getLong("CUR_WS_COMMIT_ID"),
+                rs.getLong("WS_COMMIT_ID"), rs.getString("CUR_OP"), rs.getString("COMMITTED_OP"),
+                rs.getString("WS_DATABASE"), rs.getString("WS_TABLE"), rs.getString("WS_PARTITION"));
+          } else {
+            return null;
+          }
+        });
+  }
+
+  private void moveTxnComponentsToCompleted(MultiDataSourceJdbcResource jdbcResource, long txnid, char isUpdateDelete) {
+    // Move the record from txn_components into completed_txn_components so that the compactor
+    // knows where to look to compact.
+    String query = "INSERT INTO \"COMPLETED_TXN_COMPONENTS\" (\"CTC_TXNID\", \"CTC_DATABASE\", " +
+        "\"CTC_TABLE\", \"CTC_PARTITION\", \"CTC_WRITEID\", \"CTC_UPDATE_DELETE\") SELECT \"TC_TXNID\", " +
+        "\"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_WRITEID\", :flag FROM \"TXN_COMPONENTS\" " +
+        "WHERE \"TC_TXNID\" = :txnid AND \"TC_OPERATION_TYPE\" <> :type";
+    //we only track compactor activity in TXN_COMPONENTS to handle the case where the
+    //compactor txn aborts - so don't bother copying it to COMPLETED_TXN_COMPONENTS
+    LOG.debug("Going to execute insert <{}>", query);
+    int affectedRows = jdbcResource.getJdbcTemplate().update(query,
+        new MapSqlParameterSource()
+            .addValue("flag", Character.toString(isUpdateDelete), Types.CHAR)
+            .addValue("txnid", txnid)
+            .addValue("type", OperationType.COMPACT.getSqlConst(), Types.CHAR));
+
+    if (affectedRows < 1) {
+      //this can be reasonable for an empty txn START/COMMIT or read-only txn
+      //also an IUD with DP that didn't match any rows.
+      LOG.info("Expected to move at least one record from txn_components to "
+          + "completed_txn_components when committing txn! {}", JavaUtils.txnIdToString(txnid));
+    }
+  }
+
+  private void updateKeyValueAssociatedWithTxn(MultiDataSourceJdbcResource jdbcResource, CommitTxnRequest rqst) {
+    if (!rqst.getKeyValue().getKey().startsWith(TxnStore.TXN_KEY_START)) {
+      String errorMsg = "Error updating key/value in the sql backend with"
+          + " txnId=" + rqst.getTxnid() + ","
+          + " tableId=" + rqst.getKeyValue().getTableId() + ","
+          + " key=" + rqst.getKeyValue().getKey() + ","
+          + " value=" + rqst.getKeyValue().getValue() + "."
+          + " key should start with " + TxnStore.TXN_KEY_START + ".";
+      LOG.warn(errorMsg);
+      throw new IllegalArgumentException(errorMsg);
+    }
+    String query = "UPDATE \"TABLE_PARAMS\" SET \"PARAM_VALUE\" = :value WHERE \"TBL_ID\" = :id AND \"PARAM_KEY\" = :key";
+    LOG.debug("Going to execute update <{}>", query);
+    int affectedRows = jdbcResource.getJdbcTemplate().update(query,
+        new MapSqlParameterSource()
+            .addValue("value", rqst.getKeyValue().getValue())
+            .addValue("id", rqst.getKeyValue().getTableId())
+            .addValue("key", rqst.getKeyValue().getKey()));
+    
+    if (affectedRows != 1) {
+      String errorMsg = "Error updating key/value in the sql backend with"
+          + " txnId=" + rqst.getTxnid() + ","
+          + " tableId=" + rqst.getKeyValue().getTableId() + ","
+          + " key=" + rqst.getKeyValue().getKey() + ","
+          + " value=" + rqst.getKeyValue().getValue() + "."
+          + " Only one row should have been affected but "
+          + affectedRows + " rows where affected.";
+      LOG.warn(errorMsg);
+      throw new IllegalStateException(errorMsg);
+    }
+  }
+
+  /**
+   * See overridden method in CompactionTxnHandler also.
+   */
+  private void updateWSCommitIdAndCleanUpMetadata(MultiDataSourceJdbcResource jdbcResource, long txnid, TxnType txnType,
+                                                    Long commitId, long tempId) throws MetaException {
+    List queryBatch = new ArrayList<>(6);
+    // update write_set with real commitId
+    if (commitId != null) {
+      queryBatch.add("UPDATE \"WRITE_SET\" SET \"WS_COMMIT_ID\" = " + commitId +
+          " WHERE \"WS_COMMIT_ID\" = " + tempId + " AND \"WS_TXNID\" = " + txnid);
+    }
+    // clean up txn related metadata
+    if (txnType != TxnType.READ_ONLY) {
+      queryBatch.add("DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\" = " + txnid);
+    }
+    queryBatch.add("DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_TXNID\" = " + txnid);
+    // DO NOT remove the transaction from the TXN table, the cleaner will remove it when appropriate
+    queryBatch.add("UPDATE \"TXNS\" SET \"TXN_STATE\" = " + TxnStatus.COMMITTED + " WHERE \"TXN_ID\" = " + txnid);
+    if (txnType == TxnType.MATER_VIEW_REBUILD) {
+      queryBatch.add("DELETE FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE \"MRL_TXN_ID\" = " + txnid);
+    }
+    if (txnType == TxnType.SOFT_DELETE || txnType == TxnType.COMPACTION) {
+      queryBatch.add("UPDATE \"COMPACTION_QUEUE\" SET \"CQ_NEXT_TXN_ID\" = " + commitId + ", \"CQ_COMMIT_TIME\" = " +
+          getEpochFn(jdbcResource.getDatabaseProduct()) + " WHERE \"CQ_TXN_ID\" = " + txnid);
+    }
+    
+    // execute all in one batch
+    jdbcResource.getJdbcTemplate().getJdbcTemplate().batchUpdate(queryBatch.toArray(new String[0]));
+  }
+
+  /**
+   * Create Notifiaction Events on txn commit
+   * @param txnid committed txn
+   * @param txnType transaction type
+   * @throws MetaException ex
+   */
+  private void createCommitNotificationEvent(MultiDataSourceJdbcResource jdbcResource, long txnid, TxnType txnType)
+      throws MetaException {
+    if (transactionalListeners != null) {
+      MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+          EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, txnType), jdbcResource.getConnection(), jdbcResource.getSqlGenerator());
+
+      CompactionInfo compactionInfo = jdbcResource.execute(new GetCompactionInfoHandler(txnid, true));
+      if (compactionInfo != null) {
+        MetaStoreListenerNotifier
+            .notifyEventWithDirectSql(transactionalListeners, EventMessage.EventType.COMMIT_COMPACTION,
+                new CommitCompactionEvent(txnid, compactionInfo), jdbcResource.getConnection(), jdbcResource.getSqlGenerator());
+      } else {
+        LOG.warn("No compaction queue record found for Compaction type transaction commit. txnId:" + txnid);
+      }
+      
+    }
+  }
+
+  private static class DbEntityParam {
+    final long id;
+    final String key;
+    final String value;
+
+    public DbEntityParam(long id, String key, String value) {
+      this.id = id;
+      this.key = key;
+      this.value = value;
+    }
+  }
+  
+  private static class WriteSetInfo {
+    final long txnId;
+    final long currentCommitId;
+    final long committedCommitId;
+    final String currentOperationType;
+    final String committedOperationType;
+    final String database;
+    final String table;
+    final String partition;
+
+    public WriteSetInfo(long txnId, long currentCommitId, long committedCommitId, 
+                        String currentOperationType, String committedOperationType, 
+                        String database, String table, String partition) {
+      this.txnId = txnId;
+      this.currentCommitId = currentCommitId;
+      this.committedCommitId = committedCommitId;
+      this.currentOperationType = currentOperationType;
+      this.committedOperationType = committedOperationType;
+      this.database = database;
+      this.table = table;
+      this.partition = partition;
+    }
+  }
+
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java
new file mode 100644
index 000000000000..5331dc9562ed
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/CompactFunction.java
@@ -0,0 +1,123 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hive.common.ValidCompactorWriteIdList;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionResponse;
+import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionState;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.InsertCompactionRequestCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
+
+import java.sql.Types;
+import java.util.Arrays;
+import java.util.Collections;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.INITIATED_RESPONSE;
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.INITIATED_STATE;
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.READY_FOR_CLEANING;
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.REFUSED_RESPONSE;
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.WORKING_STATE;
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getFullTableName;
+
+public class CompactFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(CompactFunction.class);
+  
+  private final CompactionRequest rqst;
+  private final long openTxnTimeOutMillis;
+  private final TxnStore.MutexAPI mutexAPI;
+
+  public CompactFunction(CompactionRequest rqst, long openTxnTimeOutMillis, TxnStore.MutexAPI mutexAPI) {
+    this.rqst = rqst;
+    this.openTxnTimeOutMillis = openTxnTimeOutMillis;
+    this.mutexAPI = mutexAPI;
+  }
+
+  @Override
+  public CompactionResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    // Put a compaction request in the queue.
+    TxnStore.MutexAPI.LockHandle handle = null;
+    try {
+      /**
+       * MUTEX_KEY.CompactionScheduler lock ensures that there is only 1 entry in
+       * Initiated/Working state for any resource.  This ensures that we don't run concurrent
+       * compactions for any resource.
+       */
+      handle = mutexAPI.acquireLock(TxnStore.MUTEX_KEY.CompactionScheduler.name());
+
+      GetValidWriteIdsRequest request = new GetValidWriteIdsRequest(
+          Collections.singletonList(getFullTableName(rqst.getDbname(), rqst.getTablename())));
+      final ValidCompactorWriteIdList tblValidWriteIds = TxnUtils.createValidCompactWriteIdList(
+          new GetValidWriteIdsFunction(request, openTxnTimeOutMillis).execute(jdbcResource).getTblValidWriteIds().get(0));
+
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("ValidCompactWriteIdList: {}", tblValidWriteIds.writeToString());
+      }
+
+      NamedParameterJdbcTemplate npJdbcTemplate = jdbcResource.getJdbcTemplate();
+      Pair existing = npJdbcTemplate.query(
+          "SELECT \"CQ_ID\", \"CQ_STATE\" FROM \"COMPACTION_QUEUE\" WHERE (\"CQ_STATE\" IN(:states) OR" +
+              " (\"CQ_STATE\" = :readyForCleaningState AND \"CQ_HIGHEST_WRITE_ID\" = :highestWriteId)) AND" +
+              " \"CQ_DATABASE\"= :dbName AND \"CQ_TABLE\"= :tableName AND ((:partition is NULL AND \"CQ_PARTITION\" IS NULL) OR \"CQ_PARTITION\" = :partition)",
+          new MapSqlParameterSource()
+              .addValue("states", Arrays.asList(Character.toString(INITIATED_STATE), Character.toString(WORKING_STATE)))
+              .addValue("readyForCleaningState", READY_FOR_CLEANING, Types.VARCHAR)
+              .addValue("highestWriteId", tblValidWriteIds.getHighWatermark())
+              .addValue("dbName", rqst.getDbname())
+              .addValue("tableName", rqst.getTablename())
+              .addValue("partition", rqst.getPartitionname(), Types.VARCHAR),
+          rs -> {
+            if (rs.next()) {
+              return new ImmutablePair<>(rs.getLong("CQ_ID"), rs.getString("CQ_STATE"));
+            }
+            return null;
+          });
+      if (existing != null) {
+        String state = CompactionState.fromSqlConst(existing.getValue()).toString();
+        LOG.info("Ignoring request to compact {}/{}/{} since it is already {} with id={}", rqst.getDbname(),
+            rqst.getTablename(), rqst.getPartitionname(), state, existing.getKey());
+        CompactionResponse resp = new CompactionResponse(-1, REFUSED_RESPONSE, false);
+        resp.setErrormessage("Compaction is already scheduled with state='" + state + "' and id=" + existing.getKey());
+        return resp;
+      }
+
+      long id = new GenerateCompactionQueueIdFunction().execute(jdbcResource);
+      jdbcResource.execute(new InsertCompactionRequestCommand(id, CompactionState.INITIATED, rqst));
+      return new CompactionResponse(id, INITIATED_RESPONSE, true);
+    } finally {
+      if (handle != null) {
+        handle.releaseLocks();
+      }
+    }
+  }
+
+
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/EnqueueLockFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/EnqueueLockFunction.java
new file mode 100644
index 000000000000..28a69cc6e619
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/EnqueueLockFunction.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.txn.TxnLockManager;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.InsertHiveLocksCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.InsertTxnComponentsCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetOpenTxnTypeAndLockHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetWriteIdsHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.EmptyResultDataAccessException;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.Objects;
+
+public class EnqueueLockFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(EnqueueLockFunction.class);  
+
+  private static final String INCREMENT_NEXT_LOCK_ID_QUERY = "UPDATE \"NEXT_LOCK_ID\" SET \"NL_NEXT\" = %s";
+  private static final String UPDATE_HIVE_LOCKS_EXT_ID_QUERY = "UPDATE \"HIVE_LOCKS\" SET \"HL_LOCK_EXT_ID\" = %s " +
+      "WHERE \"HL_LOCK_EXT_ID\" = %s";
+
+  private final LockRequest lockRequest;
+
+  public EnqueueLockFunction(LockRequest lockRequest) {
+    this.lockRequest = lockRequest;
+  }
+
+  /**
+   * This enters locks into the queue in {@link org.apache.hadoop.hive.metastore.txn.entities.LockInfo#LOCK_WAITING} mode.
+   * Isolation Level Notes:
+   * 1. We use S4U (withe read_committed) to generate the next (ext) lock id.  This serializes
+   * any 2 {@code enqueueLockWithRetry()} calls.
+   * 2. We use S4U on the relevant TXNS row to block any concurrent abort/commit/etc. operations
+   * @see TxnLockManager#checkLock(long, long, boolean, boolean)
+   */
+  @Override
+  public Long execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, TxnAbortedException, NoSuchTxnException {
+    long txnid = lockRequest.getTxnid();
+    if (TxnUtils.isValidTxn(txnid)) {
+      //this also ensures that txn is still there in expected state
+      TxnType txnType = jdbcResource.execute(new GetOpenTxnTypeAndLockHandler(jdbcResource.getSqlGenerator(), txnid));
+      if (txnType == null) {
+        new EnsureValidTxnFunction(txnid).execute(jdbcResource);
+      }
+    }
+        /* Insert txn components and hive locks (with a temp extLockId) first, before getting the next lock ID in a select-for-update.
+           This should minimize the scope of the S4U and decrease the table lock duration. */
+    if (txnid > 0) {
+      jdbcResource.execute(new InsertTxnComponentsCommand(lockRequest, jdbcResource.execute(new GetWriteIdsHandler(lockRequest))));
+    }
+    long tempExtLockId = TxnUtils.generateTemporaryId();
+    jdbcResource.execute(new InsertHiveLocksCommand(lockRequest, tempExtLockId));
+
+    /* Get the next lock id.
+     * This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race.
+     * Suppose ID gen is a separate txn and 2 concurrent lock() methods are running.  1st one generates nl_next=7,
+     * 2nd nl_next=8.  Then 8 goes first to insert into HIVE_LOCKS and acquires the locks.  Then 7 unblocks,
+     * and add it's W locks, but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)}
+     * doesn't block on locks acquired later than one it's checking*/
+    long extLockId = getNextLockIdForUpdate(jdbcResource);
+    incrementLockIdAndUpdateHiveLocks(jdbcResource.getJdbcTemplate().getJdbcTemplate(), extLockId, tempExtLockId);
+
+    jdbcResource.getTransactionManager().getActiveTransaction().createSavepoint();
+
+    return extLockId;
+  }
+
+  private long getNextLockIdForUpdate(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    String s = jdbcResource.getSqlGenerator().addForUpdateClause("SELECT \"NL_NEXT\" FROM \"NEXT_LOCK_ID\"");
+    LOG.debug("Going to execute query <{}>", s);
+    
+    try {
+      return Objects.requireNonNull(
+          jdbcResource.getJdbcTemplate().queryForObject(s, new MapSqlParameterSource(), Long.class),
+          "This never should be null, it's just to suppress warnings");
+    } catch (EmptyResultDataAccessException e) {
+      LOG.error("Failure to get next lock ID for update! SELECT query returned empty ResultSet.");
+      throw new MetaException("Transaction tables not properly " +
+          "initialized, no record found in next_lock_id");      
+    }
+  }
+
+  private void incrementLockIdAndUpdateHiveLocks(JdbcTemplate jdbcTemplate, long extLockId, long tempId) {    
+    String incrCmd = String.format(INCREMENT_NEXT_LOCK_ID_QUERY, (extLockId + 1));
+    // update hive locks entries with the real EXT_LOCK_ID (replace temp ID)
+    String updateLocksCmd = String.format(UPDATE_HIVE_LOCKS_EXT_ID_QUERY, extLockId, tempId);
+    LOG.debug("Going to execute updates in batch: <{}>, and <{}>", incrCmd, updateLocksCmd);
+    jdbcTemplate.batchUpdate(incrCmd, updateLocksCmd);
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/EnsureValidTxnFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/EnsureValidTxnFunction.java
new file mode 100644
index 000000000000..05c5ba58388c
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/EnsureValidTxnFunction.java
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+public class EnsureValidTxnFunction implements TransactionalFunction {
+
+  private final long txnId;
+
+  public EnsureValidTxnFunction(long txnId) {
+    this.txnId = txnId;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, NoSuchTxnException, TxnAbortedException {
+    SqlParameterSource paramSource = new MapSqlParameterSource().addValue("txnId", txnId);
+    // We need to check whether this transaction is valid and open
+    TxnStatus status = jdbcResource.getJdbcTemplate().query("SELECT \"TXN_STATE\" FROM \"TXNS\" WHERE \"TXN_ID\" = :txnId",
+        paramSource, rs -> rs.next() ? TxnStatus.fromString(rs.getString("TXN_STATE")) : null);
+
+    if (status == null) {
+      // todo: add LIMIT 1 instead of count - should be more efficient
+      boolean alreadyCommitted = Boolean.TRUE.equals(jdbcResource.getJdbcTemplate().query("SELECT COUNT(*) FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_TXNID\" = :txnId",
+          paramSource, rs -> {
+            // todo: strictly speaking you can commit an empty txn, thus 2nd conjunct is wrong but
+            // only possible for for multi-stmt txns
+            return rs.next() && rs.getInt(1) > 0;
+          }));
+
+      if (alreadyCommitted) {
+        // makes the message more informative - helps to find bugs in client code
+        throw new NoSuchTxnException("Transaction " + JavaUtils.txnIdToString(txnId)
+            + " is already committed.");
+      }
+      throw new NoSuchTxnException("No such transaction " + JavaUtils.txnIdToString(txnId));
+    } else {
+      if (status == TxnStatus.ABORTED) {
+        throw new TxnAbortedException("Transaction " + JavaUtils.txnIdToString(txnId)
+            + " already aborted");
+        // todo: add time of abort, which is not currently tracked. Requires schema change
+      }
+    }
+    return null;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/FindPotentialCompactionsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/FindPotentialCompactionsFunction.java
similarity index 87%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/FindPotentialCompactionsFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/FindPotentialCompactionsFunction.java
index e23c16cb6feb..7d9bdbf7e2df 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/FindPotentialCompactionsFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/FindPotentialCompactionsFunction.java
@@ -15,12 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.AbortedTxnHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.CompactionCandidateHandler;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
 
@@ -48,7 +50,7 @@ public Set execute(MultiDataSourceJdbcResource jdbcResource) thr
     Set candidates = new HashSet<>(jdbcResource.execute(
         new CompactionCandidateHandler(lastChecked, fetchSize)));
     int remaining = fetchSize - candidates.size();
-    if (collectAbortedTxns) {
+    if (collectAbortedTxns && remaining > 0) {
       candidates.addAll(jdbcResource.execute(new AbortedTxnHandler(abortedTimeThreshold, abortedThreshold, remaining)));
     }
     return candidates;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GenerateCompactionQueueIdFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GenerateCompactionQueueIdFunction.java
new file mode 100644
index 000000000000..6195d5196a77
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GenerateCompactionQueueIdFunction.java
@@ -0,0 +1,65 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+public class GenerateCompactionQueueIdFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(GenerateCompactionQueueIdFunction.class);
+
+  public GenerateCompactionQueueIdFunction() {}
+
+  @Override
+  public Long execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException  {
+    // Get the id for the next entry in the queue
+    String sql = jdbcResource.getSqlGenerator().addForUpdateClause("SELECT \"NCQ_NEXT\" FROM \"NEXT_COMPACTION_QUEUE_ID\"");
+    LOG.debug("going to execute SQL <{}>", sql);
+
+    Long allocatedId = jdbcResource.getJdbcTemplate().query(sql, rs -> {
+      if (!rs.next()) {
+        throw new IllegalStateException("Transaction tables not properly initiated, "
+            + "no record found in next_compaction_queue_id");
+      }
+      long id = rs.getLong(1);
+
+      int count = jdbcResource.getJdbcTemplate().update("UPDATE \"NEXT_COMPACTION_QUEUE_ID\" SET \"NCQ_NEXT\" = :newId WHERE \"NCQ_NEXT\" = :id",
+          new MapSqlParameterSource()
+              .addValue("id", id)
+              .addValue("newId", id + 1));
+
+      if (count != 1) {
+        //TODO: Eliminate this id generation by implementing: https://issues.apache.org/jira/browse/HIVE-27121
+        LOG.info("The returned compaction ID ({}) already taken, obtaining new", id);
+        return null;
+      }
+      return id;
+    });
+    if (allocatedId == null) {
+      return execute(jdbcResource);
+    } else {
+      return allocatedId;
+    }
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java
new file mode 100644
index 000000000000..f88fea62813b
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetMaterializationInvalidationInfoFunction.java
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.commons.lang3.ArrayUtils;
+import org.apache.hadoop.hive.common.MaterializationSnapshot;
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+import org.apache.hadoop.hive.metastore.api.Materialization;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.util.StringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Objects;
+
+public class GetMaterializationInvalidationInfoFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(GetMaterializationInvalidationInfoFunction.class);
+
+  private final CreationMetadata creationMetadata;
+  private final String validTxnListStr;
+
+  public GetMaterializationInvalidationInfoFunction(CreationMetadata creationMetadata, String validTxnListStr) {
+    this.creationMetadata = creationMetadata;
+    this.validTxnListStr = validTxnListStr;
+  }
+
+  @Override
+  public Materialization execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    if (creationMetadata.getTablesUsed().isEmpty()) {
+      // Bail out
+      LOG.warn("Materialization creation metadata does not contain any table");
+      return null;
+    }
+
+    // We are composing a query that returns a single row if an update happened after
+    // the materialization was created. Otherwise, query returns 0 rows.
+
+    // Parse validReaderWriteIdList from creation metadata
+    MaterializationSnapshot mvSnapshot = MaterializationSnapshot.fromJson(creationMetadata.getValidTxnList());
+    if (mvSnapshot.getTableSnapshots() != null && !mvSnapshot.getTableSnapshots().isEmpty()) {
+      // Incremental rebuild of MVs on Iceberg sources is not supported.
+      return null;
+    }
+    final ValidTxnWriteIdList validReaderWriteIdList = new ValidTxnWriteIdList(mvSnapshot.getValidTxnList());
+
+    // Parse validTxnList
+    final ValidReadTxnList currentValidTxnList = new ValidReadTxnList(validTxnListStr);
+    // Get the valid write id list for the tables in current state
+    final List currentTblValidWriteIdsList = new ArrayList<>();
+    for (String fullTableName : creationMetadata.getTablesUsed()) {
+      currentTblValidWriteIdsList.add(new GetValidWriteIdsForTableFunction(currentValidTxnList, fullTableName).execute(jdbcResource));
+    }
+    final ValidTxnWriteIdList currentValidReaderWriteIdList = TxnCommonUtils.createValidTxnWriteIdList(
+        currentValidTxnList.getHighWatermark(), currentTblValidWriteIdsList);
+
+    List params = new ArrayList<>();
+    StringBuilder queryUpdateDelete = new StringBuilder();
+    StringBuilder queryCompletedCompactions = new StringBuilder();
+    StringBuilder queryCompactionQueue = new StringBuilder();
+    // compose a query that select transactions containing an update...
+    queryUpdateDelete.append("SELECT \"CTC_UPDATE_DELETE\" FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_UPDATE_DELETE\" ='Y' AND (");
+    queryCompletedCompactions.append("SELECT 1 FROM \"COMPLETED_COMPACTIONS\" WHERE (");
+    queryCompactionQueue.append("SELECT 1 FROM \"COMPACTION_QUEUE\" WHERE (");
+    int i = 0;
+    for (String fullyQualifiedName : creationMetadata.getTablesUsed()) {
+      ValidWriteIdList tblValidWriteIdList =
+          validReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName);
+      if (tblValidWriteIdList == null) {
+        LOG.warn("ValidWriteIdList for table {} not present in creation metadata, this should not happen", fullyQualifiedName);
+        return null;
+      }
+
+      // First, we check whether the low watermark has moved for any of the tables.
+      // If it has, we return true, since it is not incrementally refreshable, e.g.,
+      // one of the commits that are not available may be an update/delete.
+      ValidWriteIdList currentTblValidWriteIdList =
+          currentValidReaderWriteIdList.getTableValidWriteIdList(fullyQualifiedName);
+      if (currentTblValidWriteIdList == null) {
+        LOG.warn("Current ValidWriteIdList for table {} not present in creation metadata, this should not happen", fullyQualifiedName);
+        return null;
+      }
+      if (!Objects.equals(currentTblValidWriteIdList.getMinOpenWriteId(), tblValidWriteIdList.getMinOpenWriteId())) {
+        LOG.debug("Minimum open write id do not match for table {}", fullyQualifiedName);
+        return null;
+      }
+
+      // ...for each of the tables that are part of the materialized view,
+      // where the transaction had to be committed after the materialization was created...
+      if (i != 0) {
+        queryUpdateDelete.append("OR");
+        queryCompletedCompactions.append("OR");
+        queryCompactionQueue.append("OR");
+      }
+      String[] names = TxnUtils.getDbTableName(fullyQualifiedName);
+      assert (names.length == 2);
+      queryUpdateDelete.append(" (\"CTC_DATABASE\"=? AND \"CTC_TABLE\"=?");
+      queryCompletedCompactions.append(" (\"CC_DATABASE\"=? AND \"CC_TABLE\"=?");
+      queryCompactionQueue.append(" (\"CQ_DATABASE\"=? AND \"CQ_TABLE\"=?");
+      params.add(names[0]);
+      params.add(names[1]);
+      queryUpdateDelete.append(" AND (\"CTC_WRITEID\" > " + tblValidWriteIdList.getHighWatermark());
+      queryCompletedCompactions.append(" AND (\"CC_HIGHEST_WRITE_ID\" > " + tblValidWriteIdList.getHighWatermark());
+      queryUpdateDelete.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " :
+          " OR \"CTC_WRITEID\" IN(" + StringUtils.join(",",
+              Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ) ");
+      queryCompletedCompactions.append(tblValidWriteIdList.getInvalidWriteIds().length == 0 ? ") " :
+          " OR \"CC_HIGHEST_WRITE_ID\" IN(" + StringUtils.join(",",
+              Arrays.asList(ArrayUtils.toObject(tblValidWriteIdList.getInvalidWriteIds()))) + ") ) ");
+      queryUpdateDelete.append(") ");
+      queryCompletedCompactions.append(") ");
+      queryCompactionQueue.append(") ");
+      i++;
+    }
+    // ... and where the transaction has already been committed as per snapshot taken
+    // when we are running current query
+    queryUpdateDelete.append(") AND \"CTC_TXNID\" <= " + currentValidTxnList.getHighWatermark());
+    queryUpdateDelete.append(currentValidTxnList.getInvalidTransactions().length == 0 ? " " :
+        " AND \"CTC_TXNID\" NOT IN(" + StringUtils.join(",",
+            Arrays.asList(ArrayUtils.toObject(currentValidTxnList.getInvalidTransactions()))) + ") ");
+    queryCompletedCompactions.append(")");
+    queryCompactionQueue.append(") ");
+
+    boolean hasUpdateDelete = executeBoolean(jdbcResource, queryUpdateDelete.toString(), params,
+        "Unable to retrieve materialization invalidation information: completed transaction components.");
+
+    // Execute query
+    queryCompletedCompactions.append(" UNION ");
+    queryCompletedCompactions.append(queryCompactionQueue.toString());
+    List paramsTwice = new ArrayList<>(params);
+    paramsTwice.addAll(params);
+    boolean hasCompaction = executeBoolean(jdbcResource, queryCompletedCompactions.toString(), paramsTwice,
+        "Unable to retrieve materialization invalidation information: compactions");
+
+    return new Materialization(hasUpdateDelete, hasCompaction);
+  }
+
+  private boolean executeBoolean(MultiDataSourceJdbcResource jdbcResource, String queryText, List params, String errorMessage) throws MetaException {
+    try (PreparedStatement pst = jdbcResource.getSqlGenerator().prepareStmtWithParameters(jdbcResource.getConnection(), queryText, params)) {
+      LOG.debug("Going to execute query <{}>", queryText);
+      pst.setMaxRows(1);
+      try (ResultSet rs = pst.executeQuery()) {
+        return rs.next();
+      }
+    } catch (SQLException ex) {
+      LOG.warn(errorMessage, ex);
+      throw new MetaException(errorMessage + " " + StringUtils.stringifyException(ex));
+    }
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java
new file mode 100644
index 000000000000..a7f2b64606ba
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsForTableFunction.java
@@ -0,0 +1,155 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.ResultSetExtractor;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.nio.ByteBuffer;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicLong;
+
+public class GetValidWriteIdsForTableFunction implements TransactionalFunction {
+
+  private final ValidTxnList validTxnList;
+  private final String fullTableName;
+
+  public GetValidWriteIdsForTableFunction(ValidTxnList validTxnList, String fullTableName) {
+    this.validTxnList = validTxnList;
+    this.fullTableName = fullTableName;
+  }
+
+  // Method to get the Valid write ids list for the given table
+  // Input fullTableName is expected to be of format .
+  @Override
+  public TableValidWriteIds execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    String[] names = TxnUtils.getDbTableName(fullTableName);
+    assert (names.length == 2);
+
+    // Find the writeId high watermark based upon txnId high watermark. If found, then, need to
+    // traverse through all write Ids less than writeId HWM to make exceptions list.
+    // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm))
+    long writeIdHwm = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query(
+        "SELECT MAX(\"T2W_WRITEID\") FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" <= :txnHwm "
+            + " AND \"T2W_DATABASE\" = :db AND \"T2W_TABLE\" = :table",
+        new MapSqlParameterSource()
+            .addValue("txnHwm", validTxnList.getHighWatermark())
+            .addValue("db", names[0])
+            .addValue("table", names[1]), new HwmExtractor()));
+
+    // If no writeIds allocated by txns under txnHwm, then find writeHwm from NEXT_WRITE_ID.
+    if (writeIdHwm <= 0) {
+      // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest
+      // allocated write id.
+      writeIdHwm = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query(
+          "SELECT \"NWI_NEXT\" -1 FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :db AND \"NWI_TABLE\" = :table",
+          new MapSqlParameterSource()
+              .addValue("db", names[0])
+              .addValue("table", names[1]), new HwmExtractor()));
+    }
+
+    final List invalidWriteIdList = new ArrayList<>();
+    final BitSet abortedBits = new BitSet();
+    final AtomicLong minOpenWriteId = new AtomicLong(Long.MAX_VALUE);
+    final AtomicBoolean foundValidUncompactedWrite = new AtomicBoolean(false);
+
+    // As writeIdHwm is known, query all writeIds under the writeId HWM.
+    // If any writeId under HWM is allocated by txn > txnId HWM or belongs to open/aborted txns,
+    // then will be added to invalid list. The results should be sorted in ascending order based
+    // on write id. The sorting is needed as exceptions list in ValidWriteIdList would be looked-up
+    // using binary search.
+    jdbcResource.getJdbcTemplate().query(
+        "SELECT \"T2W_TXNID\", \"T2W_WRITEID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_WRITEID\" <= :writeIdHwm" +
+            " AND \"T2W_DATABASE\" = :db AND \"T2W_TABLE\" = :table ORDER BY \"T2W_WRITEID\" ASC",
+        new MapSqlParameterSource()
+            .addValue("writeIdHwm", writeIdHwm)
+            .addValue("db", names[0])
+            .addValue("table", names[1]), rs -> {
+          while (rs.next()) {
+            long txnId = rs.getLong(1);
+            long writeId = rs.getLong(2);
+            if (validTxnList.isTxnValid(txnId)) {
+              // Skip if the transaction under evaluation is already committed.
+              foundValidUncompactedWrite.set(true);
+              continue;
+            }
+            // The current txn is either in open or aborted state.
+            // Mark the write ids state as per the txn state.
+            invalidWriteIdList.add(writeId);
+            if (validTxnList.isTxnAborted(txnId)) {
+              abortedBits.set(invalidWriteIdList.size() - 1);
+            } else {
+              minOpenWriteId.set(Math.min(minOpenWriteId.get(), writeId));
+            }
+          }
+          return null;
+        });
+
+    // If we have compacted writes and some invalid writes on the table,
+    // return the lowest invalid write as a writeIdHwm and set it as invalid.
+    if (!foundValidUncompactedWrite.get()) {
+      long writeId = invalidWriteIdList.isEmpty() ? -1 : invalidWriteIdList.get(0);
+      invalidWriteIdList.clear();
+      abortedBits.clear();
+
+      if (writeId != -1) {
+        invalidWriteIdList.add(writeId);
+        writeIdHwm = writeId;
+        if (writeId != minOpenWriteId.get()) {
+          abortedBits.set(0);
+        }
+      }
+    }
+    ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+    TableValidWriteIds owi = new TableValidWriteIds(fullTableName, writeIdHwm, invalidWriteIdList, byteBuffer);
+    if (minOpenWriteId.get() < Long.MAX_VALUE) {
+      owi.setMinOpenWriteId(minOpenWriteId.get());
+    }
+    return owi;
+  }
+
+  private static class HwmExtractor implements ResultSetExtractor {
+
+    @Override
+    public Long extractData(ResultSet rs) throws SQLException, DataAccessException {
+      if (rs.next()) {
+        return rs.getLong(1);
+      } else {
+        // Need to initialize to 0 to make sure if nobody modified this table, then current txn
+        // shouldn't read any data.
+        // If there is a conversion from non-acid to acid table, then by default 0 would be assigned as
+        // writeId for data from non-acid table and so writeIdHwm=0 would ensure those data are readable by any txns.
+        return 0L;
+      }
+    }
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java
new file mode 100644
index 000000000000..2992963e95ac
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/GetValidWriteIdsFunction.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.common.ValidReadTxnList;
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
+import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsRequest;
+import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetOpenTxnsListHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.ResultSet;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+public class GetValidWriteIdsFunction implements TransactionalFunction {
+
+  private final GetValidWriteIdsRequest rqst;
+  private final long openTxnTimeOutMillis;
+
+  public GetValidWriteIdsFunction(GetValidWriteIdsRequest rqst, long openTxnTimeOutMillis) {
+    this.rqst = rqst;
+    this.openTxnTimeOutMillis = openTxnTimeOutMillis;
+  }
+
+  public GetValidWriteIdsResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    ValidTxnList validTxnList;
+
+    // We should prepare the valid write ids list based on validTxnList of current txn.
+    // If no txn exists in the caller, then they would pass null for validTxnList and so it is
+    // required to get the current state of txns to make validTxnList
+    if (rqst.isSetValidTxnList()) {
+      assert !rqst.isSetWriteId();
+      validTxnList = new ValidReadTxnList(rqst.getValidTxnList());
+    } else if (rqst.isSetWriteId()) {
+      validTxnList = TxnCommonUtils.createValidReadTxnList(getOpenTxns(jdbcResource), 
+          getTxnId(jdbcResource, rqst.getFullTableNames().get(0), rqst.getWriteId())); 
+    } else {
+      // Passing 0 for currentTxn means, this validTxnList is not wrt to any txn
+      validTxnList = TxnCommonUtils.createValidReadTxnList(getOpenTxns(jdbcResource), 0);
+    }
+
+    // Get the valid write id list for all the tables read by the current txn
+    List tblValidWriteIdsList = new ArrayList<>();
+    for (String fullTableName : rqst.getFullTableNames()) {
+      tblValidWriteIdsList.add(new GetValidWriteIdsForTableFunction(validTxnList, fullTableName).execute(jdbcResource));
+    }
+    return new GetValidWriteIdsResponse(tblValidWriteIdsList);
+  }
+
+  private long getTxnId(MultiDataSourceJdbcResource jdbcResource, String fullTableName, Long writeId) throws MetaException {
+    String[] names = TxnUtils.getDbTableName(fullTableName);
+    assert (names.length == 2);
+    Long txnId = jdbcResource.getJdbcTemplate().query(
+        "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_DATABASE\" = :db AND "
+            + "\"T2W_TABLE\" = :table AND \"T2W_WRITEID\" = :writeId",
+        new MapSqlParameterSource()
+            .addValue("db", names[0])
+            .addValue("table", names[1])
+            .addValue("writeId", writeId),
+        (ResultSet rs) -> {
+          if(rs.next()) {
+            long id = rs.getLong(1);
+            return rs.wasNull() ? null : id;
+          }
+          return null;
+        });
+    if (txnId == null) {
+      throw new MetaException("invalid write id " + writeId + " for table " + fullTableName);
+    }
+    return txnId;
+  }
+  
+  private GetOpenTxnsResponse getOpenTxns(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    return jdbcResource.execute(new GetOpenTxnsListHandler(false, openTxnTimeOutMillis))
+        .toOpenTxnsResponse(Collections.singletonList(TxnType.READ_ONLY));
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatLockFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatLockFunction.java
new file mode 100644
index 000000000000..779de4256bfc
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatLockFunction.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+/**
+ * Heartbeats on the lock table.  This commits, so do not enter it with any state.
+ * Should not be called on a lock that belongs to transaction.
+ */
+public class HeartbeatLockFunction implements TransactionalFunction {
+
+  private final long extLockId;
+
+  public HeartbeatLockFunction(long extLockId) {
+    this.extLockId = extLockId;
+  }
+  
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) 
+      throws MetaException, NoSuchTxnException, TxnAbortedException, NoSuchLockException {
+    // If the lock id is 0, then there are no locks in this heartbeat
+    if (extLockId == 0) {
+      return null;
+    }
+    
+    int rc = jdbcResource.getJdbcTemplate().update("UPDATE \"HIVE_LOCKS\" SET \"HL_LAST_HEARTBEAT\" = " +
+        getEpochFn(jdbcResource.getDatabaseProduct()) + " WHERE \"HL_LOCK_EXT_ID\" = :extLockId",
+        new MapSqlParameterSource().addValue("extLockId", extLockId));
+    if (rc < 1) {
+      throw new NoSuchLockException("No such lock: " + JavaUtils.lockIdToString(extLockId));
+    }
+    jdbcResource.getTransactionManager().getActiveTransaction().createSavepoint();    
+    return null;
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatTxnFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatTxnFunction.java
new file mode 100644
index 000000000000..979900a7e273
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatTxnFunction.java
@@ -0,0 +1,73 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.Types;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+/**
+ * Heartbeats on the txn table. This commits, so do not enter it with any state. 
+ */
+public class HeartbeatTxnFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(HeartbeatTxnFunction.class);
+
+  private final long txnId;
+
+  public HeartbeatTxnFunction(long txnId) {
+    this.txnId = txnId;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException, NoSuchTxnException, TxnAbortedException, NoSuchLockException {
+    // If the txnid is 0, then there are no transactions in this heartbeat
+    if (txnId == 0) {
+      return null;
+    }
+    
+    int rc = jdbcResource.getJdbcTemplate().update(
+        "UPDATE \"TXNS\" SET \"TXN_LAST_HEARTBEAT\" = " + getEpochFn(jdbcResource.getDatabaseProduct()) +
+        " WHERE \"TXN_ID\" = :txnId AND \"TXN_STATE\" = :state", 
+        new MapSqlParameterSource()
+            .addValue("txnId", txnId)
+            .addValue("state", TxnStatus.OPEN.getSqlConst(), Types.CHAR));
+    
+    if (rc < 1) {
+      new EnsureValidTxnFunction(txnId).execute(jdbcResource); // This should now throw some useful exception.
+      LOG.error("Can neither heartbeat txn (txnId={}) nor confirm it as invalid.", txnId);
+      throw new NoSuchTxnException("No such txn: " + txnId);
+    }
+    
+    LOG.debug("Successfully heartbeated for txnId={}", txnId);
+    jdbcResource.getTransactionManager().getActiveTransaction().createSavepoint();
+    return null;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatTxnRangeFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatTxnRangeFunction.java
new file mode 100644
index 000000000000..ff3c801b643c
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/HeartbeatTxnRangeFunction.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest;
+import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class HeartbeatTxnRangeFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(HeartbeatTxnRangeFunction.class);
+
+  private final HeartbeatTxnRangeRequest rqst;
+
+  public HeartbeatTxnRangeFunction(HeartbeatTxnRangeRequest rqst) {
+    this.rqst = rqst;
+  }
+
+  @Override
+  public HeartbeatTxnRangeResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    HeartbeatTxnRangeResponse rsp = new HeartbeatTxnRangeResponse();
+    Set nosuch = new HashSet<>();
+    Set aborted = new HashSet<>();
+    rsp.setNosuch(nosuch);
+    rsp.setAborted(aborted);
+    /**
+     * READ_COMMITTED is sufficient since {@link #heartbeatTxn(java.sql.Connection, long)}
+     * only has 1 update statement in it and
+     * we only update existing txns, i.e. nothing can add additional txns that this operation
+     * would care about (which would have required SERIALIZABLE)
+     */
+    /*do fast path first (in 1 statement) if doesn't work, rollback and do the long version*/
+    List queries = new ArrayList<>();
+    int numTxnsToHeartbeat = (int) (rqst.getMax() - rqst.getMin() + 1);
+    List txnIds = new ArrayList<>(numTxnsToHeartbeat);
+    for (long txn = rqst.getMin(); txn <= rqst.getMax(); txn++) {
+      txnIds.add(txn);
+    }
+    TransactionContext context = jdbcResource.getTransactionManager().getActiveTransaction();
+    Object savePoint = context.createSavepoint();
+    TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries,
+        new StringBuilder("UPDATE \"TXNS\" SET \"TXN_LAST_HEARTBEAT\" = " + getEpochFn(jdbcResource.getDatabaseProduct()) +
+            " WHERE \"TXN_STATE\" = " + TxnStatus.OPEN + " AND "),
+        new StringBuilder(""), txnIds, "\"TXN_ID\"", true, false);
+    int updateCnt = 0;
+    for (String query : queries) {
+      LOG.debug("Going to execute update <{}>", query);
+      updateCnt += jdbcResource.getJdbcTemplate().update(query, new MapSqlParameterSource());
+    }
+    if (updateCnt == numTxnsToHeartbeat) {
+      //fast pass worked, i.e. all txns we were asked to heartbeat were Open as expected
+      context.rollbackToSavepoint(savePoint);
+      return rsp;
+    }
+    //if here, do the slow path so that we can return info txns which were not in expected state
+    context.rollbackToSavepoint(savePoint);
+    for (long txn = rqst.getMin(); txn <= rqst.getMax(); txn++) {
+      try {
+        new HeartbeatTxnFunction(txn).execute(jdbcResource);
+      } catch (NoSuchTxnException e) {
+        nosuch.add(txn);
+      } catch (TxnAbortedException e) {
+        aborted.add(txn);
+      } catch (NoSuchLockException e) {
+        throw new RuntimeException(e);
+      }
+    }
+    return rsp;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/LockMaterializationRebuildFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/LockMaterializationRebuildFunction.java
new file mode 100644
index 000000000000..ce7257eae648
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/LockMaterializationRebuildFunction.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.common.TableName;
+import org.apache.hadoop.hive.metastore.api.LockResponse;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.ResultSet;
+import java.time.Instant;
+
+public class LockMaterializationRebuildFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(LockMaterializationRebuildFunction.class);
+
+  private final String dbName;
+  private final String tableName;
+  private final long txnId;
+  private final TxnStore.MutexAPI mutexAPI;
+
+  public LockMaterializationRebuildFunction(String dbName, String tableName, long txnId, TxnStore.MutexAPI mutexAPI) {
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.txnId = txnId;
+    this.mutexAPI = mutexAPI;
+  }
+
+  @Override
+  public LockResponse execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Acquiring lock for materialization rebuild with {} for {}",
+          JavaUtils.txnIdToString(txnId), TableName.getDbTable(dbName, tableName));
+    }
+
+    /**
+     * MUTEX_KEY.MaterializationRebuild lock ensures that there is only 1 entry in
+     * Initiated/Working state for any resource. This ensures we do not run concurrent
+     * rebuild operations on any materialization.
+     */
+    try (TxnStore.MutexAPI.LockHandle ignored = mutexAPI.acquireLock(TxnStore.MUTEX_KEY.MaterializationRebuild.name())){
+      MapSqlParameterSource params = new MapSqlParameterSource()
+          .addValue("dbName", dbName)
+          .addValue("tableName", tableName);
+
+      String selectQ = "SELECT \"MRL_TXN_ID\" FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE" +
+          " \"MRL_DB_NAME\" = :dbName AND \"MRL_TBL_NAME\" = :tableName";
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Going to execute query {}", selectQ);
+      }
+      boolean found = Boolean.TRUE.equals(jdbcResource.getJdbcTemplate().query(selectQ, params, ResultSet::next));
+      
+      if(found) {
+        LOG.info("Ignoring request to rebuild {}/{} since it is already being rebuilt", dbName, tableName);
+        return new LockResponse(txnId, LockState.NOT_ACQUIRED);
+      }
+      
+      String insertQ = "INSERT INTO \"MATERIALIZATION_REBUILD_LOCKS\" " +
+          "(\"MRL_TXN_ID\", \"MRL_DB_NAME\", \"MRL_TBL_NAME\", \"MRL_LAST_HEARTBEAT\") " +
+          "VALUES (:txnId, :dbName, :tableName, " + Instant.now().toEpochMilli() + ")";
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Going to execute update {}", insertQ);
+      }
+      jdbcResource.getJdbcTemplate().update(insertQ, params.addValue("txnId", txnId));
+      return new LockResponse(txnId, LockState.ACQUIRED);
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/MarkCleanedFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java
similarity index 90%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/MarkCleanedFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java
index 52b8b9a32f85..9f985d7c459e 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/MarkCleanedFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MarkCleanedFunction.java
@@ -15,15 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.commons.collections.CollectionUtils;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
-import org.apache.hadoop.hive.metastore.txn.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
-import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.InClauseBatchCommand;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
 import org.slf4j.Logger;
@@ -42,11 +41,9 @@ public class MarkCleanedFunction implements TransactionalFunction {
   private static final Logger LOG = LoggerFactory.getLogger(MarkCleanedFunction.class);
 
   private final CompactionInfo info;
-  private final Configuration conf;
 
-  public MarkCleanedFunction(CompactionInfo info, Configuration conf) {
+  public MarkCleanedFunction(CompactionInfo info) {
     this.info = info;
-    this.conf = conf;
   }
 
   @Override
@@ -111,11 +108,11 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti
     }
 
     // Do cleanup of metadata in TXN_COMPONENTS table.
-    removeTxnComponents(info, jdbcTemplate);
+    removeTxnComponents(info, jdbcResource);
     return null;
   }
 
-  private void removeTxnComponents(CompactionInfo info, NamedParameterJdbcTemplate jdbcTemplate) {
+  private void removeTxnComponents(CompactionInfo info, MultiDataSourceJdbcResource jdbcResource) throws MetaException {
     /*
      * compaction may remove data from aborted txns above tc_writeid bit it only guarantees to
      * remove it up to (inclusive) tc_writeid, so it's critical to not remove metadata about
@@ -131,7 +128,7 @@ private void removeTxnComponents(CompactionInfo info, NamedParameterJdbcTemplate
 
     int totalCount = 0;
     if (!info.hasUncompactedAborts && info.highestWriteId != 0) {
-      totalCount = jdbcTemplate.update(
+      totalCount = jdbcResource.getJdbcTemplate().update(
           "DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\" IN ( "
               + "SELECT \"TXN_ID\" FROM \"TXNS\" WHERE \"TXN_STATE\" = :state) "
               + "AND \"TC_DATABASE\" = :db AND \"TC_TABLE\" = :table "
@@ -139,13 +136,14 @@ private void removeTxnComponents(CompactionInfo info, NamedParameterJdbcTemplate
               + "AND \"TC_WRITEID\" <= :id",
           params.addValue("id", info.highestWriteId));
     } else if (CollectionUtils.isNotEmpty(info.writeIds)) {
-      totalCount = TxnUtils.executeStatementWithInClause(conf, jdbcTemplate,
+      params.addValue("ids", new ArrayList<>(info.writeIds));
+      totalCount = jdbcResource.execute(new InClauseBatchCommand<>(
           "DELETE FROM \"TXN_COMPONENTS\" WHERE \"TC_TXNID\" IN ( "
               + "SELECT \"TXN_ID\" FROM \"TXNS\" WHERE \"TXN_STATE\" = :state) "
               + "AND \"TC_DATABASE\" = :db AND \"TC_TABLE\" = :table "
               + "AND (:partition is NULL OR \"TC_PARTITION\" = :partition) "
-              + "AND \"TC_WRITEID\" IN (:ids)",
-          params, "ids", new ArrayList<>(info.writeIds), Long::compareTo);
+              + "AND \"TC_WRITEID\" IN (:ids)", 
+          params, "ids", Long::compareTo));
     }
     LOG.debug("Removed {} records from txn_components", totalCount);
   }
@@ -168,11 +166,11 @@ private void removeCompactionAndAbortRetryEntries(CompactionInfo info, NamedPara
           .addValue("table", info.tableName)
           .addValue("type", Character.toString(TxnStore.ABORT_TXN_CLEANUP_TYPE), Types.CHAR)
           .addValue("partition", info.partName, Types.VARCHAR);
-    }    
+    }
 
     LOG.debug("Going to execute update <{}>", query);
-    int rc = jdbcTemplate.update(query,params);
+    int rc = jdbcTemplate.update(query, params);
     LOG.debug("Removed {} records in COMPACTION_QUEUE", rc);
-  }  
-  
+  }
+
 }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MinOpenTxnIdWaterMarkFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MinOpenTxnIdWaterMarkFunction.java
new file mode 100644
index 000000000000..0407729af01b
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/MinOpenTxnIdWaterMarkFunction.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.OpenTxnTimeoutLowBoundaryTxnIdHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.Types;
+import java.util.Objects;
+
+public class MinOpenTxnIdWaterMarkFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(MinOpenTxnIdWaterMarkFunction.class);
+  
+  private final long openTxnTimeOutMillis;
+
+  public MinOpenTxnIdWaterMarkFunction(long openTxnTimeOutMillis) {
+    this.openTxnTimeOutMillis = openTxnTimeOutMillis;
+  }
+
+  @Override
+  public Long execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    /**
+     * We try to find the highest transactionId below everything was committed or aborted.
+     * For that we look for the lowest open transaction in the TXNS and the TxnMinTimeout boundary,
+     * because it is guaranteed there won't be open transactions below that.
+     */
+    long minOpenTxn = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query(
+        "SELECT MIN(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STATE\"= :status",
+        new MapSqlParameterSource().addValue("status", TxnStatus.OPEN.getSqlConst(), Types.CHAR),
+        (ResultSet rs) -> {
+          if (!rs.next()) {
+            throw new IllegalStateException("Scalar query returned no rows?!?!!");
+          }
+          long id = rs.getLong(1);
+          if (rs.wasNull()) {
+            id = Long.MAX_VALUE;
+          }
+          return id;
+        }));
+    long lowWaterMark = jdbcResource.execute(new OpenTxnTimeoutLowBoundaryTxnIdHandler(openTxnTimeOutMillis));
+    LOG.debug("MinOpenTxnIdWaterMark calculated with minOpenTxn {}, lowWaterMark {}", minOpenTxn, lowWaterMark);
+    return Long.min(minOpenTxn, lowWaterMark + 1);
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/NextCompactionFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java
similarity index 97%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/NextCompactionFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java
index cf07b1198d1d..f39ba8b3d147 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/NextCompactionFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/NextCompactionFunction.java
@@ -15,12 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.hive.metastore.api.FindNextCompactRequest;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/OnRenameFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/OnRenameFunction.java
new file mode 100644
index 000000000000..1167ee4f42a2
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/OnRenameFunction.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.sql.Types;
+
+public class OnRenameFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(OnRenameFunction.class);
+  
+  //language=SQL
+  private static final String[] UPDATE_COMMANNDS = new String[]{
+      "UPDATE \"TXN_COMPONENTS\" SET " +
+          "\"TC_PARTITION\" = COALESCE(:newPartName, \"TC_PARTITION\"), " +
+          "\"TC_TABLE\" = COALESCE(:newTableName, \"TC_TABLE\"), " +
+          "\"TC_DATABASE\" = COALESCE(:newDbName, \"TC_DATABASE\") WHERE " +
+          "(\"TC_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"TC_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"TC_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"COMPLETED_TXN_COMPONENTS\" SET " +
+          "\"CTC_PARTITION\" = COALESCE(:newPartName, \"CTC_PARTITION\"), " +
+          "\"CTC_TABLE\" = COALESCE(:newTableName, \"CTC_TABLE\"), " +
+          "\"CTC_DATABASE\" = COALESCE(:newDbName, \"CTC_DATABASE\") WHERE " +
+          "(\"CTC_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"CTC_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"CTC_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"HIVE_LOCKS\" SET " +
+          "\"HL_PARTITION\" = COALESCE(:newPartName, \"HL_PARTITION\"), " +
+          "\"HL_TABLE\" = COALESCE(:newTableName, \"HL_TABLE\"), " +
+          "\"HL_DB\" = COALESCE(:newDbName, \"HL_DB\") WHERE " +
+          "(\"HL_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"HL_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"HL_DB\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"COMPACTION_QUEUE\" SET " +
+          "\"CQ_PARTITION\" = COALESCE(:newPartName, \"CQ_PARTITION\"), " +
+          "\"CQ_TABLE\" = COALESCE(:newTableName, \"CQ_TABLE\"), " +
+          "\"CQ_DATABASE\" = COALESCE(:newDbName, \"CQ_DATABASE\") WHERE " +
+          "(\"CQ_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"CQ_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"CQ_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"COMPLETED_COMPACTIONS\" SET " +
+          "\"CC_PARTITION\" = COALESCE(:newPartName, \"CC_PARTITION\"), " +
+          "\"CC_TABLE\" = COALESCE(:newTableName, \"CC_TABLE\"), " +
+          "\"CC_DATABASE\" = COALESCE(:newDbName, \"CC_DATABASE\") WHERE " +
+          "(\"CC_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"CC_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"CC_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"WRITE_SET\" SET " +
+          "\"WS_PARTITION\" = COALESCE(:newPartName, \"WS_PARTITION\"), " +
+          "\"WS_TABLE\" = COALESCE(:newTableName, \"WS_TABLE\"), " +
+          "\"WS_DATABASE\" = COALESCE(:newDbName, \"WS_DATABASE\") WHERE " +
+          "(\"WS_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"WS_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"WS_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"TXN_TO_WRITE_ID\" SET " +
+          "\"T2W_TABLE\" = COALESCE(:newTableName, \"T2W_TABLE\"), " +
+          "\"T2W_DATABASE\" = COALESCE(:newDbName, \"T2W_DATABASE\") WHERE " +
+          "(\"T2W_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"T2W_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"NEXT_WRITE_ID\" SET " +
+          "\"NWI_TABLE\" = COALESCE(:newTableName, \"NWI_TABLE\"), " +
+          "\"NWI_DATABASE\" = COALESCE(:newDbName, \"NWI_DATABASE\") WHERE " +
+          "(\"NWI_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"NWI_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+      "UPDATE \"COMPACTION_METRICS_CACHE\" SET " +
+          "\"CMC_PARTITION\" = COALESCE(:newPartName, \"CMC_PARTITION\"), " +
+          "\"CMC_TABLE\" = COALESCE(:newTableName, \"CMC_TABLE\"), " +
+          "\"CMC_DATABASE\" = COALESCE(:newDbName, \"CMC_DATABASE\") WHERE " +
+          "(\"CMC_PARTITION\" = :oldPartName OR :oldPartName IS NULL) AND " +
+          "(\"CMC_TABLE\" = :oldTableName OR :oldTableName IS NULL) AND " +
+          "(\"CMC_DATABASE\" = :oldDbName OR :oldDbName IS NULL)",
+  };
+
+  private final String oldCatName;
+  private final String oldDbName;
+  private final String oldTabName;
+  private final String oldPartName;
+  private final String newCatName;
+  private final String newDbName;
+  private final String newTabName;
+  private final String newPartName;
+
+  public OnRenameFunction(String oldCatName, String oldDbName, String oldTabName, String oldPartName, 
+                          String newCatName, String newDbName, String newTabName, String newPartName) {
+    this.oldCatName = oldCatName;
+    this.oldDbName = StringUtils.lowerCase(oldDbName);
+    this.oldTabName = StringUtils.lowerCase(oldTabName);
+    this.oldPartName = oldPartName;
+    this.newCatName = newCatName;
+    this.newDbName = StringUtils.lowerCase(newDbName);
+    this.newTabName = StringUtils.lowerCase(newTabName);
+    this.newPartName = newPartName;
+  }
+
+  @SuppressWarnings("squid:S2259")
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    String callSig = "onRename(" +
+        oldCatName + "," + oldDbName + "," + oldTabName + "," + oldPartName + "," +
+        newCatName + "," + newDbName + "," + newTabName + "," + newPartName + ")";
+
+    if (newPartName != null) {
+      assert oldPartName != null && oldTabName != null && oldDbName != null && oldCatName != null : callSig;
+    }
+    if (newTabName != null) {
+      assert oldTabName != null && oldDbName != null && oldCatName != null : callSig;
+    }
+    if (newDbName != null) {
+      assert oldDbName != null && oldCatName != null : callSig;
+    }
+
+    MapSqlParameterSource paramSource = new MapSqlParameterSource()
+        .addValue("oldDbName", oldDbName, Types.VARCHAR)
+        .addValue("newDbName", newDbName, Types.VARCHAR)
+        .addValue("oldTableName", oldTabName, Types.VARCHAR)
+        .addValue("newTableName", newTabName, Types.VARCHAR)
+        .addValue("oldPartName", oldPartName, Types.VARCHAR)
+        .addValue("newPartName", newPartName, Types.VARCHAR);
+    try {
+      for (String command : UPDATE_COMMANNDS) {
+        jdbcResource.getJdbcTemplate().update(command, paramSource);
+      }
+    } catch (DataAccessException e) {
+      //TODO: this seems to be very hacky, and as a result retry attempts won't happen, because DataAccessExceptions are
+      // caught and either swallowed or wrapped in MetaException. Also, only a single test fails without this block:
+      // org.apache.hadoop.hive.metastore.client.TestDatabases.testAlterDatabaseNotNullableFields
+      // It may worth investigate if this catch block is really needed. 
+      if (e.getMessage() != null && e.getMessage().contains("does not exist")) {
+        LOG.warn("Cannot perform {} since metastore table does not exist", callSig);
+      } else {
+        throw new MetaException("Unable to " + callSig + ":" + org.apache.hadoop.util.StringUtils.stringifyException(e));
+      }
+    }
+    return null;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/OpenTxnsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/OpenTxnsFunction.java
new file mode 100644
index 000000000000..e677309799a0
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/OpenTxnsFunction.java
@@ -0,0 +1,233 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.TargetTxnIdListHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.UncategorizedSQLException;
+import org.springframework.jdbc.core.JdbcTemplate;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
+
+import java.sql.Connection;
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class OpenTxnsFunction implements TransactionalFunction> {
+
+  private static final Logger LOG = LoggerFactory.getLogger(OpenTxnsFunction.class);
+
+  private static final String TXN_TMP_STATE = "_";
+  private static final String TXNS_INSERT_QRY = "INSERT INTO \"TXNS\" " +
+      "(\"TXN_STATE\", \"TXN_STARTED\", \"TXN_LAST_HEARTBEAT\", \"TXN_USER\", \"TXN_HOST\", \"TXN_TYPE\") " +
+      "VALUES(?,%s,%s,?,?,?)";
+
+  private final OpenTxnRequest rqst;
+  private final long openTxnTimeOutMillis;
+  private final List transactionalListeners;
+
+  public OpenTxnsFunction(OpenTxnRequest rqst, long openTxnTimeOutMillis, 
+                          List transactionalListeners) {
+    this.rqst = rqst;
+    this.openTxnTimeOutMillis = openTxnTimeOutMillis;
+    this.transactionalListeners = transactionalListeners;
+  }
+
+  @Override
+  public List execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    DatabaseProduct dbProduct = jdbcResource.getDatabaseProduct();
+    int numTxns = rqst.getNum_txns();
+    // Make sure the user has not requested an insane amount of txns.
+    int maxTxns = MetastoreConf.getIntVar(jdbcResource.getConf(), MetastoreConf.ConfVars.TXN_MAX_OPEN_BATCH);
+    if (numTxns > maxTxns) {
+      numTxns = maxTxns;
+    }
+    List insertPreparedStmts = null;
+    TxnType txnType = rqst.isSetTxn_type() ? rqst.getTxn_type() : TxnType.DEFAULT;
+    boolean isReplayedReplTxn = txnType == TxnType.REPL_CREATED;
+    boolean isHiveReplTxn = rqst.isSetReplPolicy() && txnType == TxnType.DEFAULT;
+    if (isReplayedReplTxn) {
+      assert rqst.isSetReplPolicy();
+      List targetTxnIdList = jdbcResource.execute(new TargetTxnIdListHandler(rqst.getReplPolicy(), rqst.getReplSrcTxnIds()));
+
+      if (!targetTxnIdList.isEmpty()) {
+        if (targetTxnIdList.size() != rqst.getReplSrcTxnIds().size()) {
+          LOG.warn("target txn id number {} is not matching with source txn id number {}",
+              targetTxnIdList, rqst.getReplSrcTxnIds());
+        }
+        LOG.info("Target transactions {} are present for repl policy : {} and Source transaction id : {}",
+            targetTxnIdList, rqst.getReplPolicy(), rqst.getReplSrcTxnIds().toString());
+        return targetTxnIdList;
+      }
+    }
+
+    long minOpenTxnId = 0;
+    if (TxnHandler.ConfVars.useMinHistoryLevel()) {
+      minOpenTxnId = new MinOpenTxnIdWaterMarkFunction(openTxnTimeOutMillis).execute(jdbcResource);
+    }
+
+    List txnIds = new ArrayList<>(numTxns);
+    /*
+     * The getGeneratedKeys are not supported in every dbms, after executing a multi line insert.
+     * But it is supported in every used dbms for single line insert, even if the metadata says otherwise.
+     * If the getGeneratedKeys are not supported first we insert a random batchId in the TXN_META_INFO field,
+     * then the keys are selected beck with that batchid.
+     */
+    boolean genKeySupport = dbProduct.supportsGetGeneratedKeys();
+    genKeySupport = genKeySupport || (numTxns == 1);
+
+    String insertQuery = String.format(TXNS_INSERT_QRY, getEpochFn(dbProduct), getEpochFn(dbProduct));
+    LOG.debug("Going to execute insert <{}>", insertQuery);
+
+    Connection dbConn = jdbcResource.getConnection();
+    NamedParameterJdbcTemplate namedParameterJdbcTemplate = jdbcResource.getJdbcTemplate();
+    int maxBatchSize = MetastoreConf.getIntVar(jdbcResource.getConf(), MetastoreConf.ConfVars.JDBC_MAX_BATCH_SIZE);
+    try (PreparedStatement ps = dbConn.prepareStatement(insertQuery, new String[]{ "TXN_ID" })) {
+      String state = genKeySupport ? TxnStatus.OPEN.getSqlConst() : TXN_TMP_STATE;
+      if (numTxns == 1) {
+        ps.setString(1, state);
+        ps.setString(2, rqst.getUser());
+        ps.setString(3, rqst.getHostname());
+        ps.setInt(4, txnType.getValue());
+        txnIds.addAll(executeTxnInsertBatchAndExtractGeneratedKeys(namedParameterJdbcTemplate, true, ps, false));
+      } else {
+        for (int i = 0; i < numTxns; ++i) {
+          ps.setString(1, state);
+          ps.setString(2, rqst.getUser());
+          ps.setString(3, rqst.getHostname());
+          ps.setInt(4, txnType.getValue());
+          ps.addBatch();
+
+          if ((i + 1) % maxBatchSize == 0) {
+            txnIds.addAll(executeTxnInsertBatchAndExtractGeneratedKeys(namedParameterJdbcTemplate, genKeySupport, ps, true));
+          }
+        }
+        if (numTxns % maxBatchSize != 0) {
+          txnIds.addAll(executeTxnInsertBatchAndExtractGeneratedKeys(namedParameterJdbcTemplate, genKeySupport, ps, true));
+        }
+      }
+    } catch (SQLException e) {
+      throw new UncategorizedSQLException(null, null, e);
+    }
+
+    assert txnIds.size() == numTxns;
+
+    addTxnToMinHistoryLevel(jdbcResource.getJdbcTemplate().getJdbcTemplate(), maxBatchSize, txnIds, minOpenTxnId);
+
+    if (isReplayedReplTxn) {
+      List rowsRepl = new ArrayList<>(numTxns);
+      List params = Collections.singletonList(rqst.getReplPolicy());
+      List> paramsList = new ArrayList<>(numTxns);
+      for (int i = 0; i < numTxns; i++) {
+        rowsRepl.add("?," + rqst.getReplSrcTxnIds().get(i) + "," + txnIds.get(i));
+        paramsList.add(params);
+      }
+
+      try {
+        insertPreparedStmts = jdbcResource.getSqlGenerator().createInsertValuesPreparedStmt(dbConn,
+            "\"REPL_TXN_MAP\" (\"RTM_REPL_POLICY\", \"RTM_SRC_TXN_ID\", \"RTM_TARGET_TXN_ID\")", rowsRepl,
+            paramsList);
+        for (PreparedStatement pst : insertPreparedStmts) {
+          try (PreparedStatement ppst = pst) {
+            ppst.execute();
+          }
+        }
+      } catch (SQLException e) {
+        throw new UncategorizedSQLException(null, null, e);
+      }
+    }
+
+    if (transactionalListeners != null && !isHiveReplTxn) {
+      MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+          EventMessage.EventType.OPEN_TXN, new OpenTxnEvent(txnIds, txnType), dbConn, jdbcResource.getSqlGenerator());
+    }
+    return txnIds;
+  }
+
+  /**
+   * Add min history level entry for each generated txn record
+   * @param jdbcTemplate {@link NamedParameterJdbcTemplate} to use for command execution
+   * @param txnIds new transaction ids
+   * @deprecated Remove this method when min_history_level table is dropped
+   * @throws SQLException ex
+   */
+  @Deprecated
+  private void addTxnToMinHistoryLevel(JdbcTemplate jdbcTemplate, int batchSize, List txnIds, long minOpenTxnId) {
+    if (!TxnHandler.ConfVars.useMinHistoryLevel()) {
+      return;
+    }
+    String sql = "INSERT INTO \"MIN_HISTORY_LEVEL\" (\"MHL_TXNID\", \"MHL_MIN_OPEN_TXNID\") VALUES(?, ?)";
+    LOG.debug("Going to execute insert batch: <{}>", sql);
+
+    jdbcTemplate.batchUpdate(sql, txnIds, batchSize, (ps, argument) -> {
+      ps.setLong(1, argument);
+      ps.setLong(2, minOpenTxnId);
+    });
+    
+    LOG.info("Added entries to MIN_HISTORY_LEVEL for current txns: ({}) with min_open_txn: {}", txnIds, minOpenTxnId);
+  }
+
+  private List executeTxnInsertBatchAndExtractGeneratedKeys(NamedParameterJdbcTemplate jdbcTemplate, boolean genKeySupport,
+                                                                  PreparedStatement ps, boolean batch) throws SQLException {
+    
+    List txnIds = new ArrayList<>();
+    if (batch) {
+      ps.executeBatch();
+    } else {
+      // For slight performance advantage we do not use the executeBatch, when we only have one row
+      ps.execute();
+    }
+    if (genKeySupport) {
+      try (ResultSet generatedKeys = ps.getGeneratedKeys()) {
+        while (generatedKeys.next()) {
+          txnIds.add(generatedKeys.getLong(1));
+        }
+      }
+    } else {
+      txnIds = jdbcTemplate.query("SELECT \"TXN_ID\" FROM \"TXNS\" WHERE \"TXN_STATE\" = :tmpState",
+          new MapSqlParameterSource().addValue("tmpState", TXN_TMP_STATE), (rs, rowNum) -> rs.getLong(1));
+
+      jdbcTemplate.update("UPDATE \"TXNS\" SET \"TXN_STATE\" = :newState WHERE \"TXN_STATE\" = :tmpState",
+          new MapSqlParameterSource()
+              .addValue("newState", TxnStatus.OPEN.getSqlConst())
+              .addValue("tmpState", TXN_TMP_STATE));
+    }
+    return txnIds;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PerformTimeoutsFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PerformTimeoutsFunction.java
new file mode 100644
index 000000000000..8a7e9555fb1a
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PerformTimeoutsFunction.java
@@ -0,0 +1,203 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+import org.apache.hadoop.hive.metastore.metrics.Metrics;
+import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionContext;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.GetTxnDbsUpdatedHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.TreeSet;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class PerformTimeoutsFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(PerformTimeoutsFunction.class);
+
+  private static final String SELECT_TIMED_OUT_LOCKS_QUERY = "SELECT DISTINCT \"HL_LOCK_EXT_ID\" FROM \"HIVE_LOCKS\" " +
+      "WHERE \"HL_LAST_HEARTBEAT\" < %s - :timeout AND \"HL_TXNID\" = 0";
+
+  public static int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000;
+
+  private final long timeout;
+  private final long replicationTxnTimeout;
+  private final List transactionalListeners;  
+
+  public PerformTimeoutsFunction(long timeout, long replicationTxnTimeout, List transactionalListeners) {
+    this.timeout = timeout;
+    this.replicationTxnTimeout = replicationTxnTimeout;
+    this.transactionalListeners = transactionalListeners;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) {
+    DatabaseProduct dbProduct = jdbcResource.getDatabaseProduct();
+    try {
+      //We currently commit after selecting the TXNS to abort.  So whether SERIALIZABLE
+      //READ_COMMITTED, the effect is the same.  We could use FOR UPDATE on Select from TXNS
+      //and do the whole performTimeOuts() in a single huge transaction, but the only benefit
+      //would be to make sure someone cannot heartbeat one of these txns at the same time.
+      //The attempt to heartbeat would block and fail immediately after it's unblocked.
+      //With current (RC + multiple txns) implementation it is possible for someone to send
+      //heartbeat at the very end of the expiry interval, and just after the Select from TXNS
+      //is made, in which case heartbeat will succeed but txn will still be Aborted.
+      //Solving this corner case is not worth the perf penalty.  The client should heartbeat in a
+      //timely way.
+      timeOutLocks(jdbcResource, dbProduct);
+      while (true) {
+        String s = " \"TXN_ID\", \"TXN_TYPE\" FROM \"TXNS\" WHERE \"TXN_STATE\" = " + TxnStatus.OPEN +
+            " AND (" +
+            "\"TXN_TYPE\" != " + TxnType.REPL_CREATED.getValue() +
+            " AND \"TXN_LAST_HEARTBEAT\" <  " + getEpochFn(dbProduct) + "-" + timeout +
+            " OR " +
+            " \"TXN_TYPE\" = " + TxnType.REPL_CREATED.getValue() +
+            " AND \"TXN_LAST_HEARTBEAT\" <  " + getEpochFn(dbProduct) + "-" + replicationTxnTimeout +
+            ")";
+        //safety valve for extreme cases
+        s = jdbcResource.getSqlGenerator().addLimitClause(10 * TIMED_OUT_TXN_ABORT_BATCH_SIZE, s);
+
+        LOG.debug("Going to execute query <{}>", s);
+        List> timedOutTxns = Objects.requireNonNull(jdbcResource.getJdbcTemplate().query(s, rs -> {
+          List> txnbatch = new ArrayList<>();
+          Map currentBatch = new HashMap<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE);
+          while (rs.next()) {
+            currentBatch.put(rs.getLong(1),TxnType.findByValue(rs.getInt(2)));
+            if (currentBatch.size() == TIMED_OUT_TXN_ABORT_BATCH_SIZE) {
+              txnbatch.add(currentBatch);
+              currentBatch = new HashMap<>(TIMED_OUT_TXN_ABORT_BATCH_SIZE);
+            }
+          }
+          if (!currentBatch.isEmpty()) {
+            txnbatch.add(currentBatch);
+          }
+          return txnbatch;
+        }), "This never should be null, it's just to suppress warnings");
+        if (timedOutTxns.isEmpty()) {
+          return null;
+        }
+
+        TransactionContext context = jdbcResource.getTransactionManager().getActiveTransaction();
+        Object savePoint = context.createSavepoint();
+
+        int numTxnsAborted = 0;
+        for (Map batchToAbort : timedOutTxns) {
+          context.releaseSavepoint(savePoint);
+          savePoint = context.createSavepoint();
+          int abortedTxns = new AbortTxnsFunction(new ArrayList<>(batchToAbort.keySet()), true, false, false, 
+              TxnErrorMsg.ABORT_TIMEOUT).execute(jdbcResource);
+          
+          if (abortedTxns == batchToAbort.size()) {
+            numTxnsAborted += batchToAbort.size();
+            //todo: add TXNS.COMMENT filed and set it to 'aborted by system due to timeout'
+            LOG.info("Aborted the following transactions due to timeout: {}", batchToAbort);
+            if (transactionalListeners != null) {
+              for (Map.Entry txnEntry : batchToAbort.entrySet()) {
+                List dbsUpdated = jdbcResource.execute(new GetTxnDbsUpdatedHandler(txnEntry.getKey()));
+                MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                    EventMessage.EventType.ABORT_TXN,
+                    new AbortTxnEvent(txnEntry.getKey(), txnEntry.getValue(), null, dbsUpdated),
+                    jdbcResource.getConnection(), jdbcResource.getSqlGenerator());
+              }
+              LOG.debug("Added Notifications for the transactions that are aborted due to timeout: {}", batchToAbort);
+            }
+          } else {
+            //could not abort all txns in this batch - this may happen because in parallel with this
+            //operation there was activity on one of the txns in this batch (commit/abort/heartbeat)
+            //This is not likely but may happen if client experiences long pause between heartbeats or
+            //unusually long/extreme pauses between heartbeat() calls and other logic in checkLock(),
+            //lock(), etc.
+            context.rollbackToSavepoint(savePoint);
+          }
+        }
+        LOG.info("Aborted {} transaction(s) due to timeout", numTxnsAborted);
+        if (MetastoreConf.getBoolVar(jdbcResource.getConf(), MetastoreConf.ConfVars.METASTORE_ACIDMETRICS_EXT_ON)) {
+          Metrics.getOrCreateCounter(MetricsConstants.TOTAL_NUM_TIMED_OUT_TXNS).inc(numTxnsAborted);
+        }
+      }
+    } catch (Exception e) {
+      LOG.warn("Aborting timed out transactions failed due to " + e.getMessage(), e);
+    }
+    return null;
+  }
+
+  // Clean time out locks from the database not associated with a transactions, i.e. locks
+  // for read-only autoCommit=true statements.  This does a commit,
+  // and thus should be done before any calls to heartbeat that will leave
+  // open transactions.
+  private void timeOutLocks(MultiDataSourceJdbcResource jdbcResource, DatabaseProduct dbProduct) {
+    //doing a SELECT first is less efficient but makes it easier to debug things
+    //when txnid is <> 0, the lock is associated with a txn and is handled by performTimeOuts()
+    //want to avoid expiring locks for a txn w/o expiring the txn itself
+    try {
+      Set timedOutLockIds = new TreeSet<>(
+          jdbcResource.getJdbcTemplate().query(String.format(SELECT_TIMED_OUT_LOCKS_QUERY, getEpochFn(dbProduct)),
+              new MapSqlParameterSource().addValue("timeout", timeout),
+              (rs, rowNum) -> rs.getLong(1)));
+      if (timedOutLockIds.isEmpty()) {
+        LOG.debug("Did not find any timed-out locks, therefore retuning.");
+        return;
+      }
+
+      List queries = new ArrayList<>();
+      StringBuilder prefix = new StringBuilder();
+      StringBuilder suffix = new StringBuilder();
+
+      //include same hl_last_heartbeat condition in case someone heartbeated since the select
+      prefix.append("DELETE FROM \"HIVE_LOCKS\" WHERE \"HL_LAST_HEARTBEAT\" < ");
+      prefix.append(getEpochFn(dbProduct)).append("-").append(timeout);
+      prefix.append(" AND \"HL_TXNID\" = 0 AND ");
+
+      TxnUtils.buildQueryWithINClause(jdbcResource.getConf(), queries, prefix, suffix, timedOutLockIds,
+          "\"HL_LOCK_EXT_ID\"", true, false);
+
+      int deletedLocks = 0;
+      for (String query : queries) {
+        LOG.debug("Going to execute update: <{}>", query);
+        deletedLocks += jdbcResource.getJdbcTemplate().update(query, new MapSqlParameterSource());
+      }
+      if (deletedLocks > 0) {
+        LOG.info("Deleted {} locks due to timed-out. Lock ids: {}", deletedLocks, timedOutLockIds);
+      }
+    } catch (Exception ex) {
+      LOG.error("Failed to purge timed-out locks: " + ex.getMessage(), ex);
+    }
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/PurgeCompactionHistoryFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java
similarity index 93%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/PurgeCompactionHistoryFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java
index f1402ed899cf..1a5888499643 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/PurgeCompactionHistoryFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/PurgeCompactionHistoryFunction.java
@@ -15,14 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.InClauseBatchCommand;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
 import org.slf4j.Logger;
@@ -43,20 +44,15 @@ public class PurgeCompactionHistoryFunction implements TransactionalFunction deleteSet = new ArrayList<>();
     long timeoutThreshold = System.currentTimeMillis() -
         MetastoreConf.getTimeVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_TIMEOUT, TimeUnit.MILLISECONDS);
     int didNotInitiateRetention = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_DID_NOT_INITIATE);
-    int failedRetention = getFailedCompactionRetention();
+    int failedRetention = getFailedCompactionRetention(conf);
     int succeededRetention = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED);
     int refusedRetention = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_REFUSED);
         /* cc_id is monotonically increasing so for any entity sorts in order of compaction history,
@@ -90,13 +86,12 @@ public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaExcepti
       return null;
     }
 
-    int totalCount = TxnUtils.executeStatementWithInClause(conf, jdbcTemplate,
+    int totalCount = jdbcResource.execute(new InClauseBatchCommand<>(
         "DELETE FROM \"COMPLETED_COMPACTIONS\" WHERE \"CC_ID\" in (:ids)",
-        new MapSqlParameterSource(), "ids", deleteSet, Long::compareTo);
+        new MapSqlParameterSource().addValue("ids", deleteSet), "ids", Long::compareTo));
     LOG.debug("Removed {} records from COMPLETED_COMPACTIONS", totalCount);
     return null;
-  }
-  
+  }  
 
   private void checkForDeletion(List deleteSet, CompactionInfo ci, RetentionCounters rc, long timeoutThreshold) {
     switch (ci.state) {
@@ -139,7 +134,7 @@ private static boolean timedOut(CompactionInfo ci, RetentionCounters rc, long pa
    * this ensures that the number of failed compaction entries retained is > than number of failed
    * compaction threshold which prevents new compactions from being scheduled.
    */
-  private int getFailedCompactionRetention() {
+  private int getFailedCompactionRetention(Configuration conf) {
     int failedThreshold = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
     int failedRetention = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
     if(failedRetention < failedThreshold) {
@@ -167,5 +162,5 @@ private static class RetentionCounters {
       this.refusedRetention = refusedRetention;
     }
   }
-
+  
 }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReleaseMaterializationRebuildLocks.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReleaseMaterializationRebuildLocks.java
new file mode 100644
index 000000000000..86f1af05e6ad
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReleaseMaterializationRebuildLocks.java
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.common.ValidTxnList;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+
+public class ReleaseMaterializationRebuildLocks implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ReleaseMaterializationRebuildLocks.class);
+
+  private final ValidTxnList validTxnList;
+  private final long timeout;
+
+  public ReleaseMaterializationRebuildLocks(ValidTxnList validTxnList, long timeout) {
+    this.validTxnList = validTxnList;
+    this.timeout = timeout;
+  }
+
+  @Override
+  public Long execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    // Aux values
+    long cnt = 0L;
+    List txnIds = new ArrayList<>();
+    long timeoutTime = Instant.now().toEpochMilli() - timeout;
+
+
+    String selectQ = "SELECT \"MRL_TXN_ID\", \"MRL_LAST_HEARTBEAT\" FROM \"MATERIALIZATION_REBUILD_LOCKS\"";
+    LOG.debug("Going to execute query <{}>", selectQ);
+
+    jdbcResource.getJdbcTemplate().query(selectQ, rs -> {
+      if (rs.next()) {
+        long lastHeartbeat = rs.getLong(2);
+        if (lastHeartbeat < timeoutTime) {
+          // The heartbeat has timeout, double check whether we can remove it
+          long txnId = rs.getLong(1);
+          if (validTxnList.isTxnValid(txnId) || validTxnList.isTxnAborted(txnId)) {
+            // Txn was committed (but notification was not received) or it was aborted.
+            // Either case, we can clean it up
+            txnIds.add(txnId);
+          }
+        }
+      }
+      return null;
+    });
+
+    if (!txnIds.isEmpty()) {
+      String deleteQ = "DELETE FROM \"MATERIALIZATION_REBUILD_LOCKS\" WHERE \"MRL_TXN_ID\" IN(:txnIds)";
+      LOG.debug("Going to execute update <{}>", deleteQ);
+      cnt = jdbcResource.getJdbcTemplate().update(deleteQ, new MapSqlParameterSource().addValue("txnIds", txnIds));
+    }
+    LOG.debug("Going to commit");
+    return cnt;
+  }
+
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java
new file mode 100644
index 000000000000..f369bb35f12f
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/ReplTableWriteIdStateFunction.java
@@ -0,0 +1,158 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+import org.apache.hadoop.hive.common.ValidWriteIdList;
+import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.CompactionRequest;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.OpenTxnRequest;
+import org.apache.hadoop.hive.metastore.api.ReplTblWriteIdStateRequest;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnErrorMsg;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
+
+import java.sql.PreparedStatement;
+import java.sql.ResultSet;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+public class ReplTableWriteIdStateFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(ReplTableWriteIdStateFunction.class);
+  
+  private final ReplTblWriteIdStateRequest rqst;
+  private final TxnStore.MutexAPI mutexAPI;
+  private final List transactionalListeners;
+
+  public ReplTableWriteIdStateFunction(ReplTblWriteIdStateRequest rqst, TxnStore.MutexAPI mutexAPI, List transactionalListeners) {
+    this.rqst = rqst;
+    this.mutexAPI = mutexAPI;
+    this.transactionalListeners = transactionalListeners;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    long openTxnTimeOutMillis = MetastoreConf.getTimeVar(jdbcResource.getConf(), MetastoreConf.ConfVars.TXN_OPENTXN_TIMEOUT, TimeUnit.MILLISECONDS);
+
+    String dbName = rqst.getDbName().toLowerCase();
+    String tblName = rqst.getTableName().toLowerCase();
+    ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(rqst.getValidWriteIdlist());
+
+    NamedParameterJdbcTemplate npjdbcTemplate = jdbcResource.getJdbcTemplate();
+    // Check if this txn state is already replicated for this given table. If yes, then it is
+    // idempotent case and just return.
+    boolean found = Boolean.TRUE.equals(npjdbcTemplate.query(
+        "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName", 
+        new MapSqlParameterSource()
+            .addValue("dbName", dbName)
+            .addValue("tableName", tblName),
+        ResultSet::next
+    ));
+
+    if (found) {
+      LOG.info("Idempotent flow: WriteId state <{}> is already applied for the table: {}.{}",
+          validWriteIdList, dbName, tblName);
+      return null;
+    }
+
+    // Get the abortedWriteIds which are already sorted in ascending order.
+    List abortedWriteIds = getAbortedWriteIds(validWriteIdList);
+    int numAbortedWrites = abortedWriteIds.size();
+    if (numAbortedWrites > 0) {
+      // Allocate/Map one txn per aborted writeId and abort the txn to mark writeid as aborted.
+      // We don't use the txnLock, all of these transactions will be aborted in this one rdbm transaction
+      // So they will not effect the commitTxn in any way
+
+      List txnIds = new OpenTxnsFunction(
+          new OpenTxnRequest(numAbortedWrites, rqst.getUser(), rqst.getHostName()),
+          openTxnTimeOutMillis, transactionalListeners).execute(jdbcResource);
+      assert (numAbortedWrites == txnIds.size());
+
+      // Map each aborted write id with each allocated txn.
+      List params = new ArrayList<>(txnIds.size());
+      for (int i = 0; i < txnIds.size(); i++) {
+        params.add(new Object[] {txnIds.get(i), dbName, tblName, abortedWriteIds.get(i)});
+        LOG.info("Allocated writeID: {} for txnId: {}", abortedWriteIds.get(i), txnIds.get(i));
+      }
+      
+      int maxBatchSize = MetastoreConf.getIntVar(jdbcResource.getConf(), MetastoreConf.ConfVars.JDBC_MAX_BATCH_SIZE);
+      jdbcResource.getJdbcTemplate().getJdbcTemplate().batchUpdate(
+          "INSERT INTO \"TXN_TO_WRITE_ID\" (\"T2W_TXNID\", \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\") VALUES (?, ?, ?, ?)",
+          params, maxBatchSize, (PreparedStatement ps, Object[] statementParams) -> {
+            ps.setLong(1, (Long)statementParams[0]);
+            ps.setString(2, statementParams[1].toString());
+            ps.setString(3, statementParams[2].toString());
+            ps.setLong(4, (Long)statementParams[3]);
+          });
+
+      // Abort all the allocated txns so that the mapped write ids are referred as aborted ones.
+      int numAborts = new AbortTxnsFunction(txnIds, false, false,false,
+          TxnErrorMsg.ABORT_REPL_WRITEID_TXN).execute(jdbcResource);
+      assert (numAborts == numAbortedWrites);
+    }
+
+    // There are some txns in the list which has no write id allocated and hence go ahead and do it.
+    // Get the next write id for the given table and update it with new next write id.
+    // It is expected NEXT_WRITE_ID doesn't have entry for this table and hence directly insert it.
+    long nextWriteId = validWriteIdList.getHighWatermark() + 1;
+
+    // First allocation of write id (hwm+1) should add the table to the next_write_id meta table.
+    npjdbcTemplate.update(
+        "INSERT INTO \"NEXT_WRITE_ID\" (\"NWI_DATABASE\", \"NWI_TABLE\", \"NWI_NEXT\") VALUES (:dbName, :tableName, :nextWriteId)",
+        new MapSqlParameterSource()
+            .addValue("dbName", dbName)
+            .addValue("tableName", tblName)
+            .addValue("nextWriteId", nextWriteId));
+    LOG.info("WriteId state <{}> is applied for the table: {}.{}", validWriteIdList, dbName, tblName);
+
+    // Schedule Major compaction on all the partitions/table to clean aborted data
+    if (numAbortedWrites > 0) {
+      CompactionRequest compactRqst = new CompactionRequest(rqst.getDbName(), rqst.getTableName(),
+          CompactionType.MAJOR);
+      if (rqst.isSetPartNames()) {
+        for (String partName : rqst.getPartNames()) {
+          compactRqst.setPartitionname(partName);
+          new CompactFunction(compactRqst, openTxnTimeOutMillis, mutexAPI).execute(jdbcResource);
+        }
+      } else {
+        new CompactFunction(compactRqst, openTxnTimeOutMillis, mutexAPI).execute(jdbcResource);
+      }
+    }
+    return null;
+  }
+
+  private List getAbortedWriteIds(ValidWriteIdList validWriteIdList) {
+    return Arrays.stream(validWriteIdList.getInvalidWriteIds())
+        .filter(validWriteIdList::isWriteIdAborted)
+        .boxed()
+        .collect(Collectors.toList());
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/TopCompactionMetricsDataPerTypeFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java
similarity index 87%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/TopCompactionMetricsDataPerTypeFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java
index 409212f8edb6..16cfcfecc679 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/TopCompactionMetricsDataPerTypeFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/TopCompactionMetricsDataPerTypeFunction.java
@@ -15,11 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
-import org.apache.hadoop.hive.metastore.txn.CompactionMetricsData;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
 import org.springframework.jdbc.core.RowMapper;
@@ -37,11 +36,9 @@ public class TopCompactionMetricsDataPerTypeFunction implements TransactionalFun
           "\"COMPACTION_METRICS_CACHE\" WHERE \"CMC_METRIC_TYPE\" = :type ORDER BY \"CMC_METRIC_VALUE\" DESC";
   
   private final int limit;
-  private final SQLGenerator sqlGenerator;
 
-  public TopCompactionMetricsDataPerTypeFunction(int limit, SQLGenerator sqlGenerator) {
+  public TopCompactionMetricsDataPerTypeFunction(int limit) {
     this.limit = limit;
-    this.sqlGenerator = sqlGenerator;
   }
 
   @Override
@@ -50,7 +47,7 @@ public List execute(MultiDataSourceJdbcResource jdbcResou
     List metricsDataList = new ArrayList<>();
     for (CompactionMetricsData.MetricType type : CompactionMetricsData.MetricType.values()) {
       metricsDataList.addAll(jdbcResource.getJdbcTemplate().query(
-          sqlGenerator.addLimitClause(limit, NO_SELECT_COMPACTION_METRICS_CACHE_FOR_TYPE_QUERY),
+          jdbcResource.getSqlGenerator().addLimitClause(limit, NO_SELECT_COMPACTION_METRICS_CACHE_FOR_TYPE_QUERY),
           new MapSqlParameterSource().addValue("type", type.toString()),
           new CompactionMetricsDataMapper(type)));
     }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdataDatabasePropFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdataDatabasePropFunction.java
new file mode 100644
index 000000000000..23e9f7715bfa
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdataDatabasePropFunction.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
+
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
+import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.NamedParameterJdbcTemplate;
+
+public class UpdataDatabasePropFunction implements TransactionalFunction {
+
+  private static final Logger LOG = LoggerFactory.getLogger(UpdataDatabasePropFunction.class);
+
+  private final String database;
+  private final long dbId;
+  private final String prop;
+  private final String propValue;
+
+  public UpdataDatabasePropFunction(String database, long dbId, String prop, String propValue) {
+    this.database = database;
+    this.dbId = dbId;
+    this.prop = prop;
+    this.propValue = propValue;
+  }
+
+  @Override
+  public Void execute(MultiDataSourceJdbcResource jdbcResource) throws MetaException {
+    MapSqlParameterSource params = new MapSqlParameterSource()
+        .addValue("dbId", dbId)
+        .addValue("key", prop)
+        .addValue("value", propValue);
+    NamedParameterJdbcTemplate jdbcTemplate = jdbcResource.getJdbcTemplate();
+
+    String value = jdbcTemplate.query("SELECT \"PARAM_VALUE\" FROM \"DATABASE_PARAMS\" " +
+        "WHERE \"PARAM_KEY\" = :key AND \"DB_ID\" = :dbId", params, rs -> rs.next() ? rs.getString("PARAM_VALUE") : null);
+
+    int count = 1;
+    if (value != null) {
+      if (value.equals(propValue)) {
+        LOG.info("Database property: {} with value: {} already updated for db: {}", prop, propValue, database);
+      } else {
+        count = jdbcTemplate.update("UPDATE \"DATABASE_PARAMS\" SET \"PARAM_VALUE\" = :value WHERE \"DB_ID\" = :dbId AND " +
+            "\"PARAM_KEY\" = :key", params);
+      }
+    } else {
+      count = jdbcTemplate.update("INSERT INTO \"DATABASE_PARAMS\" VALUES (:dbId, :key, :value)", params);
+    }
+    if (count != 1) {
+      //only one row insert or update should happen
+      throw new RuntimeException("DATABASE_PARAMS is corrupted for database: " + database);
+    }
+    return null;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/UpdateCompactionMetricsDataFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java
similarity index 92%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/UpdateCompactionMetricsDataFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java
index e372041922cd..1f6ac60a6eb9 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/UpdateCompactionMetricsDataFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/functions/UpdateCompactionMetricsDataFunction.java
@@ -15,10 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.functions;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionMetricsData;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData;
+import org.apache.hadoop.hive.metastore.txn.jdbc.commands.RemoveCompactionMetricsDataCommand;
+import org.apache.hadoop.hive.metastore.txn.jdbc.queries.CompactionMetricsDataHandler;
 import org.apache.hadoop.hive.metastore.txn.jdbc.MultiDataSourceJdbcResource;
 import org.apache.hadoop.hive.metastore.txn.jdbc.TransactionalFunction;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortTxnInfoHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortTxnInfoHandler.java
new file mode 100644
index 000000000000..cf7f50956d49
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortTxnInfoHandler.java
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.CompactionType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.READY_FOR_CLEANING;
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class AbortTxnInfoHandler implements QueryHandler> {
+
+  // Three inner sub-queries which are under left-join to fetch the required data for aborted txns.
+  //language=SQL
+  private static final String SELECT_ABORTS_WITH_MIN_OPEN_WRITETXN_QUERY =
+      " \"res1\".\"TC_DATABASE\" AS \"DB\", \"res1\".\"TC_TABLE\" AS \"TBL\", \"res1\".\"TC_PARTITION\" AS \"PART\", " +
+          " \"res1\".\"MIN_TXN_START_TIME\" AS \"MIN_TXN_START_TIME\", \"res1\".\"ABORTED_TXN_COUNT\" AS \"ABORTED_TXN_COUNT\", " +
+          " \"res2\".\"MIN_OPEN_WRITE_TXNID\" AS \"MIN_OPEN_WRITE_TXNID\", \"res3\".\"RETRY_RETENTION\" AS \"RETRY_RETENTION\", " +
+          " \"res3\".\"ID\" AS \"RETRY_CQ_ID\" " +
+          " FROM " +
+          // First sub-query - Gets the aborted txns with min txn start time, number of aborted txns
+          // for corresponding db, table, partition.
+          " ( SELECT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", MIN(\"TXN_STARTED\") AS \"MIN_TXN_START_TIME\", " +
+          " COUNT(*) AS \"ABORTED_TXN_COUNT\" FROM \"TXNS\", \"TXN_COMPONENTS\" " +
+          " WHERE \"TXN_ID\" = \"TC_TXNID\" AND \"TXN_STATE\" = :abortedState" +
+          " GROUP BY \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" %s ) \"res1\" " +
+          " LEFT JOIN" +
+          // Second sub-query - Gets the min open txn id for corresponding db, table, partition.
+          "( SELECT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", MIN(\"TC_TXNID\") AS \"MIN_OPEN_WRITE_TXNID\" " +
+          " FROM \"TXNS\", \"TXN_COMPONENTS\" " +
+          " WHERE \"TXN_ID\" = \"TC_TXNID\" AND \"TXN_STATE\" = :openState" +
+          " GROUP BY \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" ) \"res2\"" +
+          " ON \"res1\".\"TC_DATABASE\" = \"res2\".\"TC_DATABASE\"" +
+          " AND \"res1\".\"TC_TABLE\" = \"res2\".\"TC_TABLE\"" +
+          " AND (\"res1\".\"TC_PARTITION\" = \"res2\".\"TC_PARTITION\" " +
+          " OR (\"res1\".\"TC_PARTITION\" IS NULL AND \"res2\".\"TC_PARTITION\" IS NULL)) " +
+          " LEFT JOIN " +
+          // Third sub-query - Gets the retry entries for corresponding db, table, partition.
+          "( SELECT \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", MAX(\"CQ_ID\") AS \"ID\", " +
+          " MAX(\"CQ_RETRY_RETENTION\") AS \"RETRY_RETENTION\", " +
+          " MIN(\"CQ_COMMIT_TIME\") - %s + MAX(\"CQ_RETRY_RETENTION\") AS \"RETRY_RECORD_CHECK\" FROM \"COMPACTION_QUEUE\" " +
+          " WHERE \"CQ_TYPE\" = :type" +
+          " GROUP BY \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\") \"res3\" " +
+          " ON \"res1\".\"TC_DATABASE\" = \"res3\".\"CQ_DATABASE\" " +
+          " AND \"res1\".\"TC_TABLE\" = \"res3\".\"CQ_TABLE\" " +
+          " AND (\"res1\".\"TC_PARTITION\" = \"res3\".\"CQ_PARTITION\" " +
+          " OR (\"res1\".\"TC_PARTITION\" IS NULL AND \"res3\".\"CQ_PARTITION\" IS NULL))" +
+          " WHERE \"res3\".\"RETRY_RECORD_CHECK\" <= 0 OR \"res3\".\"RETRY_RECORD_CHECK\" IS NULL";
+
+  private final long abortedTimeThreshold;
+  private final int abortedThreshold;
+  private final int fetchSize;
+  
+  public String getParameterizedQueryString(DatabaseProduct dbProduct) throws MetaException {
+    return dbProduct.addLimitClause(
+        fetchSize,
+        String.format(AbortTxnInfoHandler.SELECT_ABORTS_WITH_MIN_OPEN_WRITETXN_QUERY,
+            abortedTimeThreshold >= 0 ? "" : " HAVING COUNT(*) > " + abortedThreshold, getEpochFn(dbProduct)));
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("abortedState", TxnStatus.ABORTED.getSqlConst(), Types.CHAR)
+        .addValue("openState", TxnStatus.OPEN.getSqlConst(), Types.CHAR)
+        .addValue("type", Character.toString(TxnStore.ABORT_TXN_CLEANUP_TYPE), Types.CHAR);
+  }
+
+  @Override
+  public List extractData(ResultSet rs) throws DataAccessException, SQLException {
+    List readyToCleanAborts = new ArrayList<>();
+    long systemTime = System.currentTimeMillis();
+    boolean checkAbortedTimeThreshold = abortedTimeThreshold >= 0;
+    while (rs.next()) {
+      boolean pastTimeThreshold =
+          checkAbortedTimeThreshold && rs.getLong("MIN_TXN_START_TIME") + abortedTimeThreshold < systemTime;
+      int numAbortedTxns = rs.getInt("ABORTED_TXN_COUNT");
+      if (numAbortedTxns > abortedThreshold || pastTimeThreshold) {
+        CompactionInfo info = new CompactionInfo();
+        info.dbname = rs.getString("DB");
+        info.tableName = rs.getString("TBL");
+        info.partName = rs.getString("PART");
+        // In this case, this field contains min open write txn ID.
+        long value = rs.getLong("MIN_OPEN_WRITE_TXNID");
+        info.minOpenWriteTxnId = value > 0 ? value : Long.MAX_VALUE;
+        // The specific type, state assigned to abort cleanup.
+        info.type = CompactionType.ABORT_TXN_CLEANUP;
+        info.state = READY_FOR_CLEANING;
+        info.retryRetention = rs.getLong("RETRY_RETENTION");
+        info.id = rs.getLong("RETRY_CQ_ID");
+        readyToCleanAborts.add(info);
+      }
+    }
+    return readyToCleanAborts;
+  }
+
+  public AbortTxnInfoHandler(long abortedTimeThreshold, int abortedThreshold, int fetchSize) {
+    this.abortedTimeThreshold = abortedTimeThreshold;
+    this.abortedThreshold = abortedThreshold;
+    this.fetchSize = fetchSize;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/AbortedTxnHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java
similarity index 94%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/AbortedTxnHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java
index 10c27971daa1..f31b2d19d3a6 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/AbortedTxnHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/AbortedTxnHandler.java
@@ -15,12 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
-import org.apache.hadoop.hive.metastore.txn.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CheckFailedCompactionsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java
similarity index 96%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CheckFailedCompactionsHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java
index 0041d4f734ba..b6a16cd09d19 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CheckFailedCompactionsHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CheckFailedCompactionsHandler.java
@@ -15,13 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CompactionCandidateHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java
similarity index 96%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CompactionCandidateHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java
index c5d056c78611..98158783277e 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CompactionCandidateHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionCandidateHandler.java
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CompactionMetricsDataHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java
similarity index 95%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CompactionMetricsDataHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java
index 8186cda0d395..1e112f4db1e6 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/CompactionMetricsDataHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CompactionMetricsDataHandler.java
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionMetricsData;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionMetricsData;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CountOpenTxnsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CountOpenTxnsHandler.java
new file mode 100644
index 000000000000..9f9458f2f1bd
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/CountOpenTxnsHandler.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class CountOpenTxnsHandler implements QueryHandler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(CountOpenTxnsHandler.class);
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT COUNT(*) FROM \"TXNS\" WHERE \"TXN_STATE\" = :state";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource().addValue("state", TxnStatus.OPEN.getSqlConst());
+  }
+
+  @Override
+  public Integer extractData(ResultSet rs) throws SQLException, DataAccessException {
+    if (!rs.next()) {
+      LOG.error("Transaction database not properly configured, can't find txn_state from TXNS.");
+      return -1;
+    } else {
+      Long numOpen = rs.getLong(1);
+      if (numOpen > Integer.MAX_VALUE) {
+        LOG.error("Open transaction count above {}, can't count that high!", Integer.MAX_VALUE);
+        return -1;
+      } else {
+        return numOpen.intValue();
+      }
+    }
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/DbTimeHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/DbTimeHandler.java
new file mode 100644
index 000000000000..12f17b2c67dc
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/DbTimeHandler.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.dao.EmptyResultDataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+
+public class DbTimeHandler implements QueryHandler {
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return databaseProduct.getDBTime();
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource();
+  }
+
+  @Override
+  public Timestamp extractData(ResultSet rs) throws SQLException, DataAccessException {
+    if (rs.next()) {
+      return rs.getTimestamp(1);
+    } else {
+      throw new EmptyResultDataAccessException("Could not obtain DB time", 1);
+    }
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/FindColumnsWithStatsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
similarity index 95%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/FindColumnsWithStatsHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
index 82a16e3e69e7..e20d429e25f2 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/FindColumnsWithStatsHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindColumnsWithStatsHandler.java
@@ -15,10 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindTxnStateHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindTxnStateHandler.java
new file mode 100644
index 000000000000..a5490af57c1a
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/FindTxnStateHandler.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class FindTxnStateHandler implements QueryHandler {
+  
+  private final long txnId;
+
+  public FindTxnStateHandler(long txnId) {
+    this.txnId = txnId;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return 
+        "SELECT s.STATE from (" +
+          "SELECT '1_txns' AS SOURCE, \"TXN_STATE\" AS STATE FROM \"TXNS\" WHERE \"TXN_ID\" = :txnId "+
+          "UNION " +
+          "SELECT '2_txn_compontents' AS SOURCE, 'c' AS STATE FROM \"COMPLETED_TXN_COMPONENTS\" WHERE \"CTC_TXNID\" = :txnId) s " +
+        "ORDER BY s.SOURCE";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource().addValue("txnId", txnId);
+  }
+
+  @Override
+  public TxnStatus extractData(ResultSet rs) throws SQLException, DataAccessException {
+    if (rs.next()) {
+        return TxnStatus.fromString(rs.getString("STATE")); 
+    } else {
+      return TxnStatus.UNKNOWN;
+    }
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/GetCompactionInfoHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java
similarity index 96%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/GetCompactionInfoHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java
index 916f1c877ffb..7da6d9d377e0 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/GetCompactionInfoHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetCompactionInfoHandler.java
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
@@ -34,7 +34,7 @@ public class GetCompactionInfoHandler implements QueryHandler {
   private final long id;
   private final boolean isTransactionId;
 
-  // language=PostgreSQL
+  // language=SQL
   public static final String SELECT_BY_ID = 
       "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", "
       + "\"CQ_STATE\", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", \"CQ_RUN_AS\", "
@@ -43,7 +43,7 @@ public class GetCompactionInfoHandler implements QueryHandler {
       + "\"CQ_RETRY_RETENTION\", \"CQ_NEXT_TXN_ID\", \"CQ_TXN_ID\", \"CQ_COMMIT_TIME\", \"CQ_POOL_NAME\", "
       + "\"CQ_NUMBER_OF_BUCKETS\", \"CQ_ORDER_BY\" FROM \"COMPACTION_QUEUE\" WHERE \"CQ_ID\" = :id";
 
-  // language=PostgreSQL
+  // language=SQL
   public static final String SELECT_BY_TXN_ID =
       "SELECT \"CQ_ID\", \"CQ_DATABASE\", \"CQ_TABLE\", \"CQ_PARTITION\", "
           + "\"CQ_STATE\", \"CQ_TYPE\", \"CQ_TBLPROPERTIES\", \"CQ_WORKER_ID\", \"CQ_START\", \"CQ_RUN_AS\", "
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetDatabaseIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetDatabaseIdHandler.java
new file mode 100644
index 000000000000..d6c06017fa9b
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetDatabaseIdHandler.java
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class GetDatabaseIdHandler implements QueryHandler {
+
+  private final String database;
+  private final String catalog;
+
+  public GetDatabaseIdHandler(String database, String catalog) {
+    this.database = database;
+    this.catalog = catalog;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT \"DB_ID\" FROM \"DBS\" WHERE \"NAME\" = :database AND \"CTLG_NAME\" = :catalog";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("database", database)
+        .addValue("catalog", catalog);
+  }
+
+  @Override
+  public Long extractData(ResultSet rs) throws SQLException {
+    if (!rs.next()) {
+      throw new MetaWrapperException(new MetaException("DB with name " + database + " does not exist in catalog " + catalog));
+    }
+    return rs.getLong(1);
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetHighWaterMarkHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetHighWaterMarkHandler.java
new file mode 100644
index 000000000000..c6cec1504d96
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetHighWaterMarkHandler.java
@@ -0,0 +1,55 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.dao.EmptyResultDataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class GetHighWaterMarkHandler implements QueryHandler {
+  
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT MAX(\"TXN_ID\") FROM \"TXNS\"";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource();
+  }
+
+  @Override
+  public Long extractData(ResultSet rs) throws SQLException, DataAccessException {
+    if(rs.next()) {
+      long max = rs.getLong(1);
+      if (rs.wasNull()) {
+        throw new EmptyResultDataAccessException("Transaction tables not properly " + "initialized, null record found in MAX(TXN_ID)", 1);
+      } else {
+        return max;
+      }
+    }
+    throw new EmptyResultDataAccessException("Transaction tables not properly " + "initialized, null record found in MAX(TXN_ID)", 1);
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java
new file mode 100644
index 000000000000..b22fb0f0a894
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLatestCommittedCompactionInfoHandler.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.commons.collections.CollectionUtils;
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.CompactionInfoStruct;
+import org.apache.hadoop.hive.metastore.api.GetLatestCommittedCompactionInfoRequest;
+import org.apache.hadoop.hive.metastore.api.GetLatestCommittedCompactionInfoResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionState;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.Set;
+
+public class GetLatestCommittedCompactionInfoHandler implements QueryHandler {
+  
+  private final GetLatestCommittedCompactionInfoRequest rqst;
+
+  public GetLatestCommittedCompactionInfoHandler(GetLatestCommittedCompactionInfoRequest rqst) {
+    this.rqst = rqst;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return 
+        "SELECT * FROM ( " +
+          "SELECT \"CC_ID\", \"CC_DATABASE\", \"CC_TABLE\", \"CC_PARTITION\", \"CC_TYPE\" FROM \"COMPLETED_COMPACTIONS\" " +
+          "   WHERE \"CC_STATE\" = :succeeded " + 
+          "UNION ALL " +
+          "SELECT \"CQ_ID\" AS \"CC_ID\", \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", " +
+          "\"CQ_PARTITION\" AS \"CC_PARTITION\", \"CQ_TYPE\" AS \"CC_TYPE\" FROM \"COMPACTION_QUEUE\" " +
+          "   WHERE \"CQ_STATE\" = :readyForCleaning) AS compactions " +
+        "WHERE " +
+            "\"CC_DATABASE\" = :dbName AND \"CC_TABLE\" = :tableName AND " +
+            "(\"CC_PARTITION\" IN (:partitionNames) OR :emptyPartitionNames = TRUE) AND " +
+            "(\"CC_ID\" > :id OR :id IS NULL) " +
+        "ORDER BY \"CC_ID\" DESC";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("succeeded", CompactionState.SUCCEEDED.getSqlConst(), Types.CHAR)
+        .addValue("readyForCleaning", CompactionState.READY_FOR_CLEANING.getSqlConst(), Types.CHAR)
+        .addValue("dbName", rqst.getDbname())
+        .addValue("tableName", rqst.getTablename())
+        .addValue("emptyPartitionNames", CollectionUtils.isEmpty(rqst.getPartitionnames()), Types.BOOLEAN)
+        .addValue("partitionNames", CollectionUtils.isEmpty(rqst.getPartitionnames()) ? null : rqst.getPartitionnames(), Types.VARCHAR)
+        .addValue("id", rqst.isSetLastCompactionId() ? rqst.getLastCompactionId() : null, Types.BIGINT);
+  }
+
+  @Override
+  public GetLatestCommittedCompactionInfoResponse extractData(ResultSet rs) throws SQLException, DataAccessException {
+    GetLatestCommittedCompactionInfoResponse response = new GetLatestCommittedCompactionInfoResponse(new ArrayList<>());
+    Set partitionSet = new HashSet<>();
+    while (rs.next()) {
+      CompactionInfoStruct lci = new CompactionInfoStruct();
+      lci.setId(rs.getLong(1));
+      lci.setDbname(rs.getString(2));
+      lci.setTablename(rs.getString(3));
+      String partition = rs.getString(4);
+      if (!rs.wasNull()) {
+        lci.setPartitionname(partition);
+      }
+      lci.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0)));
+      // Only put the latest record of each partition into response
+      if (!partitionSet.contains(partition)) {
+        response.addToCompactions(lci);
+        partitionSet.add(partition);
+      }
+    }
+    return response;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLocksByLockId.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLocksByLockId.java
new file mode 100644
index 000000000000..9981d19d2708
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetLocksByLockId.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.txn.entities.LockInfo;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * NEVER call this function without first calling heartbeat(long, long)
+ */
+public class GetLocksByLockId implements QueryHandler> {
+
+  private static final String noSelectQuery = " \"HL_LOCK_EXT_ID\", \"HL_LOCK_INT_ID\", " +
+      "\"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", \"HL_LOCK_TYPE\", \"HL_TXNID\" " +
+      "FROM \"HIVE_LOCKS\" WHERE \"HL_LOCK_EXT_ID\" = :extLockId";
+  
+  private final long extLockId;
+  private final int limit;
+  private final SQLGenerator sqlGenerator;
+
+  public GetLocksByLockId(long extLockId, int limit, SQLGenerator sqlGenerator) {
+    this.extLockId = extLockId;
+    this.limit = limit;
+    this.sqlGenerator = sqlGenerator;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    if (limit > 0) {
+      return sqlGenerator.addLimitClause(limit, noSelectQuery);
+    } else {
+      return "SELECT " + noSelectQuery;      
+    }
+ }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource().addValue("extLockId", extLockId);
+  }
+
+  @Override
+  public List extractData(ResultSet rs) throws SQLException, DataAccessException {
+    List result = new ArrayList<>();
+    while (rs.next()) {
+      try {
+        result.add(new LockInfo(rs));
+      } catch (MetaException e) {
+        throw new MetaWrapperException(e);
+      }
+    }
+    return result;
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java
new file mode 100644
index 000000000000..5fcfd8d974a5
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetMaxAllocatedTableWriteIdHandler.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdRequest;
+import org.apache.hadoop.hive.metastore.api.MaxAllocatedTableWriteIdResponse;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class GetMaxAllocatedTableWriteIdHandler implements QueryHandler {
+
+  private final MaxAllocatedTableWriteIdRequest rqst;
+
+  public GetMaxAllocatedTableWriteIdHandler(MaxAllocatedTableWriteIdRequest rqst) {
+    this.rqst = rqst;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT \"NWI_NEXT\" FROM \"NEXT_WRITE_ID\" WHERE \"NWI_DATABASE\" = :dbName AND \"NWI_TABLE\" = :tableName";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("dbName", rqst.getDbName())
+        .addValue("tableName", rqst.getTableName());
+  }
+
+  @Override
+  public MaxAllocatedTableWriteIdResponse extractData(ResultSet rs) throws SQLException, DataAccessException {
+    long maxWriteId = 0l;
+    if (rs.next()) {
+      // The row contains the nextId not the previously allocated
+      maxWriteId = rs.getLong(1) - 1;
+    }
+    return new MaxAllocatedTableWriteIdResponse(maxWriteId);
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetOpenTxnTypeAndLockHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetOpenTxnTypeAndLockHandler.java
new file mode 100644
index 000000000000..889f3cbe1751
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetOpenTxnTypeAndLockHandler.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.Optional;
+
+public class GetOpenTxnTypeAndLockHandler implements QueryHandler {
+  
+  private final SQLGenerator sqlGenerator;
+  private final long txnId;
+
+  public GetOpenTxnTypeAndLockHandler(SQLGenerator sqlGenerator, long txnId) {
+    this.sqlGenerator = sqlGenerator;
+    this.txnId = txnId;
+  }
+
+  /**
+   * Note that by definition select for update is divorced from update, i.e. you executeQuery() to read
+   * and then executeUpdate().  One other alternative would be to actually update the row in TXNS but
+   * to the same value as before thus forcing db to acquire write lock for duration of the transaction.
+   * SELECT ... FOR UPDATE locks the row until the transaction commits or rolls back.
+   * Second connection using `SELECT ... FOR UPDATE` will suspend until the lock is released.
+   * @return the txnType wrapped in an {@link Optional}
+   */
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return sqlGenerator.addForUpdateClause(
+        "SELECT \"TXN_TYPE\" FROM \"TXNS\" WHERE \"TXN_ID\" = :id AND \"TXN_STATE\" = :state");
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("id", txnId)
+        .addValue("state", TxnStatus.OPEN.getSqlConst(), Types.CHAR);
+  }
+
+  @Override
+  public TxnType extractData(ResultSet rs) throws SQLException, DataAccessException {
+    return rs.next() ? TxnType.findByValue(rs.getInt(1)) : null;
+  }
+
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetOpenTxnsListHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetOpenTxnsListHandler.java
new file mode 100644
index 000000000000..464af14328d7
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetOpenTxnsListHandler.java
@@ -0,0 +1,126 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.txn.entities.OpenTxn;
+import org.apache.hadoop.hive.metastore.txn.entities.OpenTxnList;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+public class GetOpenTxnsListHandler implements QueryHandler {
+
+  private static final Logger LOG = LoggerFactory.getLogger(GetOpenTxnsListHandler.class);
+
+  //language=SQL
+  private static final String OPEN_TXNS_QUERY = "SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\", "
+      + "(%s - \"TXN_STARTED\") FROM \"TXNS\" ORDER BY \"TXN_ID\"";
+  //language=SQL
+  private static final String OPEN_TXNS_INFO_QUERY = "SELECT \"TXN_ID\", \"TXN_STATE\", \"TXN_TYPE\", "
+      + "(%s - \"TXN_STARTED\"), \"TXN_USER\", \"TXN_HOST\", \"TXN_STARTED\", \"TXN_LAST_HEARTBEAT\" "
+      + "FROM \"TXNS\" ORDER BY \"TXN_ID\"";
+  
+  private final boolean infoFields;
+  private final long openTxnTimeOutMillis;
+
+  public GetOpenTxnsListHandler(boolean infoFields, long openTxnTimeOutMillis) {
+    this.infoFields = infoFields;
+    this.openTxnTimeOutMillis = openTxnTimeOutMillis;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return String.format(infoFields ? OPEN_TXNS_INFO_QUERY : OPEN_TXNS_QUERY, TxnUtils.getEpochFn(databaseProduct));  
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return null;
+  }
+
+  // We need to figure out the HighWaterMark and the list of open transactions.
+  /*
+   * This method need guarantees from
+   * {@link #openTxns(OpenTxnRequest)} and  {@link #commitTxn(CommitTxnRequest)}.
+   * It will look at the TXNS table and find each transaction between the max(txn_id) as HighWaterMark
+   * and the max(txn_id) before the TXN_OPENTXN_TIMEOUT period as LowWaterMark.
+   * Every transaction that is not found between these will be considered as open, since it may appear later.
+   * openTxns must ensure, that no new transaction will be opened with txn_id below LWM and
+   * commitTxn must ensure, that no committed transaction will be removed before the time period expires.
+   */
+  @Override
+  public OpenTxnList extractData(ResultSet rs) throws SQLException, DataAccessException {
+    /*
+     * We can use the maximum txn_id from the TXNS table as high water mark, since the commitTxn and the Initiator
+     * guarantees, that the transaction with the highest txn_id will never be removed from the TXNS table.
+     * If there is a pending openTxns, that is already acquired it's sequenceId but not yet committed the insert
+     * into the TXNS table, will have either a lower txn_id than HWM and will be listed in the openTxn list,
+     * or will have a higher txn_id and don't effect this getOpenTxns() call.
+     */
+    long hwm = 0;
+    long openTxnLowBoundary = 0;
+    List txnInfos = new ArrayList<>();
+
+    while (rs.next()) {
+      long txnId = rs.getLong(1);
+      long age = rs.getLong(4);
+      hwm = txnId;
+      if (age < openTxnTimeOutMillis) {
+        // We will consider every gap as an open transaction from the previous txnId
+        openTxnLowBoundary++;
+        while (txnId > openTxnLowBoundary) {
+          // Add an empty open transaction for every missing value
+          txnInfos.add(new OpenTxn(openTxnLowBoundary, TxnStatus.OPEN, TxnType.DEFAULT));
+          LOG.debug("Open transaction added for missing value in TXNS {}",
+              JavaUtils.txnIdToString(openTxnLowBoundary));
+          openTxnLowBoundary++;
+        }
+      } else {
+        openTxnLowBoundary = txnId;
+      }
+      TxnStatus state = TxnStatus.fromString(rs.getString(2));
+      if (state == TxnStatus.COMMITTED) {
+        // This is only here, to avoid adding this txnId as possible gap
+        continue;
+      }
+      OpenTxn txnInfo = new OpenTxn(txnId, state, TxnType.findByValue(rs.getInt(3)));
+      if (infoFields) {
+        txnInfo.setUser(rs.getString(5));
+        txnInfo.setHost(rs.getString(6));
+        txnInfo.setStartedTime(rs.getLong(7));
+        txnInfo.setLastHeartBeatTime(rs.getLong(8));
+      }
+      txnInfos.add(txnInfo);
+    }
+    LOG.debug("Got OpenTxnList with hwm: {} and openTxnList size {}.", hwm, txnInfos.size());
+    return new OpenTxnList(hwm, txnInfos);
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetTxnDbsUpdatedHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetTxnDbsUpdatedHandler.java
new file mode 100644
index 000000000000..4600064afc3c
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetTxnDbsUpdatedHandler.java
@@ -0,0 +1,62 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Returns the databases updated by txnId.
+ * Queries TXN_TO_WRITE_ID using txnId.
+ */
+public class GetTxnDbsUpdatedHandler implements QueryHandler> {
+  
+  private final long txnId;
+
+  public GetTxnDbsUpdatedHandler(long txnId) {
+    this.txnId = txnId;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT DISTINCT \"T2W_DATABASE\" FROM \"TXN_TO_WRITE_ID\" \"COMMITTED\" WHERE \"T2W_TXNID\" = :txnId";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource().addValue("txnId", txnId);
+  }
+
+  @Override
+  public List extractData(ResultSet rs) throws SQLException, DataAccessException {
+    List dbsUpdated = new ArrayList<>();
+    while (rs.next()) {
+      dbsUpdated.add(rs.getString(1));
+    }
+    return dbsUpdated;
+  }
+}
\ No newline at end of file
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java
new file mode 100644
index 000000000000..8ae2023a89a3
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/GetWriteIdsHandler.java
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.LockRequest;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class GetWriteIdsHandler implements QueryHandler, Long>> {
+
+
+  //language=SQL
+  private static final String SELECT_WRITE_ID_QUERY = "SELECT \"T2W_DATABASE\", \"T2W_TABLE\", \"T2W_WRITEID\" " +
+      "FROM \"TXN_TO_WRITE_ID\" WHERE \"T2W_TXNID\" = :txnId ";
+
+  private final LockRequest lockRequest;
+
+  public GetWriteIdsHandler(LockRequest lockRequest) {
+    this.lockRequest = lockRequest;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    StringBuilder sb = new StringBuilder(SELECT_WRITE_ID_QUERY);
+    sb.append(" AND (");
+    for(int i = 0; i< lockRequest.getComponentSize(); i++) {
+      sb.append("(\"T2W_DATABASE\" = ").append(":db").append(i)
+          .append(" AND \"T2W_TABLE\" = :table").append(i).append(")");
+      if(i < lockRequest.getComponentSize() - 1) {
+        sb.append(" OR ");
+      }
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    MapSqlParameterSource params = new MapSqlParameterSource()
+        .addValue("txnId", lockRequest.getTxnid());
+    for(int i = 0; i< lockRequest.getComponentSize(); i++) {
+      params.addValue("db" + i, lockRequest.getComponent().get(i).getDbname());
+      params.addValue("table" + i, lockRequest.getComponent().get(i).getTablename());
+    }
+    return params;
+  }
+
+  @Override
+  public Map, Long> extractData(ResultSet rs) throws SQLException, DataAccessException {
+    Map, Long> writeIds = new HashMap<>();
+    while (rs.next()) {
+      writeIds.put(Pair.of(rs.getString("T2W_DATABASE"), rs.getString("T2W_TABLE")), rs.getLong("T2W_WRITEID"));
+    }
+    return writeIds;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java
new file mode 100644
index 000000000000..f63748ca9662
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/LatestTxnIdInConflictHandler.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import com.google.common.collect.ImmutableList;
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.OperationType;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.List;
+
+public class LatestTxnIdInConflictHandler implements QueryHandler {
+
+  private static final List OPERATION_TYPES = ImmutableList.of(
+    OperationType.UPDATE.getSqlConst(),
+    OperationType.DELETE.getSqlConst()
+  );
+  
+  private final long txnId;
+
+  public LatestTxnIdInConflictHandler(long txnId) {
+    this.txnId = txnId;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return 
+        " SELECT MAX(\"COMMITTED\".\"WS_TXNID\")" +
+        " FROM \"WRITE_SET\" \"COMMITTED\"" +
+        " INNER JOIN (" +
+        "   SELECT DISTINCT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\", \"TC_TXNID\"" +
+        "   FROM \"TXN_COMPONENTS\"" +
+        "   WHERE \"TC_TXNID\" = :txnId" +
+        "     AND \"TC_OPERATION_TYPE\" IN (:types)" +
+        " ) \"CUR\"" +
+        " ON \"COMMITTED\".\"WS_DATABASE\" = \"CUR\".\"TC_DATABASE\"" +
+        "   AND \"COMMITTED\".\"WS_TABLE\" = \"CUR\".\"TC_TABLE\"" +
+        (TxnHandler.ConfVars.useMinHistoryLevel() ? "" :
+        "   AND \"COMMITTED\".\"WS_OPERATION_TYPE\" != :wsType") + 
+        // For partitioned table we always track writes at partition level (never at table)
+        // and for non partitioned - always at table level, thus the same table should never
+        // have entries with partition key and w/o
+        "   AND (\"COMMITTED\".\"WS_PARTITION\" = \"CUR\".\"TC_PARTITION\" OR" +
+        "     \"CUR\".\"TC_PARTITION\" IS NULL) " +
+        // txns overlap
+        " WHERE \"CUR\".\"TC_TXNID\" <= \"COMMITTED\".\"WS_COMMIT_ID\"";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("txnId", txnId)
+        .addValue("types", OPERATION_TYPES, Types.CHAR)
+        .addValue("wsType", OperationType.INSERT.getSqlConst(), Types.CHAR);        
+  }
+
+  @Override
+  public Long extractData(ResultSet rs) throws SQLException, DataAccessException {
+    return rs.next() ? rs.getLong(1) : -1;
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/MetricsInfoHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/MetricsInfoHandler.java
new file mode 100644
index 000000000000..d967e222f0fc
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/MetricsInfoHandler.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.TxnType;
+import org.apache.hadoop.hive.metastore.txn.entities.MetricsInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.text.MessageFormat;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnStore.READY_FOR_CLEANING;
+
+public class MetricsInfoHandler implements QueryHandler {
+  
+  public static final MetricsInfoHandler INSTANCE = new MetricsInfoHandler();
+  
+  //language=SQL
+  private static final String SELECT_METRICS_INFO_QUERY =
+      "SELECT * FROM (SELECT COUNT(*) FROM \"TXN_TO_WRITE_ID\") \"TTWID\" CROSS JOIN (" +
+          "SELECT COUNT(*) FROM \"COMPLETED_TXN_COMPONENTS\") \"CTC\" CROSS JOIN (" +
+          "SELECT COUNT(*), MIN(\"TXN_ID\"), ({0} - MIN(\"TXN_STARTED\"))/1000 FROM \"TXNS\" " +
+          "   WHERE \"TXN_STATE\"= :openStatus AND \"TXN_TYPE\" = :replCreatedType) \"TR\" CROSS JOIN (" +
+          "SELECT COUNT(*), MIN(\"TXN_ID\"), ({0} - MIN(\"TXN_STARTED\"))/1000 FROM \"TXNS\" " +
+          "   WHERE \"TXN_STATE\"= :openStatus AND \"TXN_TYPE\" != :replCreatedType) \"T\" CROSS JOIN (" +
+          "SELECT COUNT(*), MIN(\"TXN_ID\"), ({0} - MIN(\"TXN_STARTED\"))/1000 FROM \"TXNS\" " +
+          "   WHERE \"TXN_STATE\"= :abortedStatus) \"A\" CROSS JOIN (" +
+          "SELECT COUNT(*), ({0} - MIN(\"HL_ACQUIRED_AT\"))/1000 FROM \"HIVE_LOCKS\") \"HL\" CROSS JOIN (" +
+          "SELECT ({0} - MIN(\"CQ_COMMIT_TIME\"))/1000 from \"COMPACTION_QUEUE\" " +
+          "   WHERE \"CQ_STATE\"= :readyForCleaningState) OLDEST_CLEAN";
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return MessageFormat.format(SELECT_METRICS_INFO_QUERY, TxnUtils.getEpochFn(databaseProduct));
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("openStatus", TxnStatus.OPEN.getSqlConst(), Types.CHAR)
+        .addValue("abortedStatus", TxnStatus.ABORTED.getSqlConst(), Types.CHAR)
+        .addValue("replCreatedType", TxnType.REPL_CREATED.getValue())
+        .addValue("readyForCleaningState", Character.toString(READY_FOR_CLEANING), Types.CHAR);
+  }
+
+  @Override
+  public MetricsInfo extractData(ResultSet rs) throws SQLException, DataAccessException {
+    MetricsInfo metrics = new MetricsInfo();
+    if (rs.next()) {
+      metrics.setTxnToWriteIdCount(rs.getInt(1));
+      metrics.setCompletedTxnsCount(rs.getInt(2));
+      metrics.setOpenReplTxnsCount(rs.getInt(3));
+      metrics.setOldestOpenReplTxnId(rs.getInt(4));
+      metrics.setOldestOpenReplTxnAge(rs.getInt(5));
+      metrics.setOpenNonReplTxnsCount(rs.getInt(6));
+      metrics.setOldestOpenNonReplTxnId(rs.getInt(7));
+      metrics.setOldestOpenNonReplTxnAge(rs.getInt(8));
+      metrics.setAbortedTxnsCount(rs.getInt(9));
+      metrics.setOldestAbortedTxnId(rs.getInt(10));
+      metrics.setOldestAbortedTxnAge(rs.getInt(11));
+      metrics.setLocksCount(rs.getInt(12));
+      metrics.setOldestLockAge(rs.getInt(13));
+      metrics.setOldestReadyForCleaningAge(rs.getInt(14));
+    }
+    return metrics;
+  }
+  
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/MinUncommittedTxnIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/MinUncommittedTxnIdHandler.java
similarity index 96%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/MinUncommittedTxnIdHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/MinUncommittedTxnIdHandler.java
index ba527fb4d39b..327963a5a8d5 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/MinUncommittedTxnIdHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/MinUncommittedTxnIdHandler.java
@@ -15,11 +15,11 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.txn.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
 import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/OpenTxnTimeoutLowBoundaryTxnIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/OpenTxnTimeoutLowBoundaryTxnIdHandler.java
new file mode 100644
index 000000000000..6fb65b75716f
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/OpenTxnTimeoutLowBoundaryTxnIdHandler.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+import static org.apache.hadoop.hive.metastore.txn.TxnUtils.getEpochFn;
+
+public class OpenTxnTimeoutLowBoundaryTxnIdHandler implements QueryHandler {
+  
+  private final long openTxnTimeOutMillis;
+
+  public OpenTxnTimeoutLowBoundaryTxnIdHandler(long openTxnTimeOutMillis) {
+    this.openTxnTimeOutMillis = openTxnTimeOutMillis;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT MAX(\"TXN_ID\") FROM \"TXNS\" WHERE \"TXN_STARTED\" < (" + getEpochFn(databaseProduct) + " - "
+        + openTxnTimeOutMillis + ")";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource();
+  }
+
+  @Override
+  public Long extractData(ResultSet rs) throws SQLException, DataAccessException {
+    rs.next();
+    long result = rs.getLong(1);
+    if (rs.wasNull()) {
+      /*
+       * TXNS always contains at least one transaction,
+       * the row where txnid = (select max(txnid) where txn_started < epoch - TXN_OPENTXN_TIMEOUT) is never deleted
+       */
+      throw new SQLException("Transaction tables not properly " + "initialized, null record found in MAX(TXN_ID)");
+    }
+    return result;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/ReadyToCleanAbortHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java
similarity index 97%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/ReadyToCleanAbortHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java
index 4965bdd7baf3..4940d384095b 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/ReadyToCleanAbortHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanAbortHandler.java
@@ -15,15 +15,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.CompactionType;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
-import org.apache.hadoop.hive.metastore.txn.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
 import org.apache.hadoop.hive.metastore.txn.TxnStore;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/ReadyToCleanHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java
similarity index 92%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/ReadyToCleanHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java
index 66a1481c2366..0f22b00e1976 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/impl/ReadyToCleanHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ReadyToCleanHandler.java
@@ -15,13 +15,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.impl;
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.txn.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionInfo;
+import org.apache.hadoop.hive.metastore.txn.TxnHandler;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
 import org.springframework.dao.DataAccessException;
@@ -38,13 +39,11 @@
 
 public class ReadyToCleanHandler implements QueryHandler> {
   
-  private final boolean useMinHistoryWriteId;
   private final long minOpenTxnWaterMark;
   private final long retentionTime;
   private final int fetchSize;
 
-  public ReadyToCleanHandler(Configuration conf, boolean useMinHistoryWriteId, long minOpenTxnWaterMark, long retentionTime) {
-    this.useMinHistoryWriteId = useMinHistoryWriteId;
+  public ReadyToCleanHandler(Configuration conf, long minOpenTxnWaterMark, long retentionTime) {
     this.minOpenTxnWaterMark = minOpenTxnWaterMark;
     this.retentionTime = retentionTime;
     this.fetchSize = MetastoreConf.getIntVar(conf, MetastoreConf.ConfVars.COMPACTOR_FETCH_SIZE);
@@ -64,7 +63,7 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw
         " \"CQ_ID\", \"cq1\".\"CQ_DATABASE\", \"cq1\".\"CQ_TABLE\", \"cq1\".\"CQ_PARTITION\"," +
             "  \"CQ_TYPE\", \"CQ_RUN_AS\", \"CQ_HIGHEST_WRITE_ID\", \"CQ_TBLPROPERTIES\", \"CQ_RETRY_RETENTION\", " +
             "  \"CQ_NEXT_TXN_ID\"";
-    if (useMinHistoryWriteId) {
+    if (TxnHandler.ConfVars.useMinHistoryWriteId()) {
       queryStr += ", \"MIN_OPEN_WRITE_ID\"";
     }
     queryStr +=
@@ -80,7 +79,7 @@ public String getParameterizedQueryString(DatabaseProduct databaseProduct) throw
             "    OR \"cq1\".\"CQ_PARTITION\" IS NULL AND \"cq2\".\"CQ_PARTITION\" IS NULL)" +
             "  AND \"CQ_HIGHEST_WRITE_ID\" = \"MIN_WRITE_ID_HWM\" ";
 
-    if (useMinHistoryWriteId) {
+    if (TxnHandler.ConfVars.useMinHistoryWriteId()) {
       queryStr +=
           "LEFT JOIN (" +
               "  SELECT MIN(\"MH_WRITEID\") \"MIN_OPEN_WRITE_ID\", \"MH_DATABASE\", \"MH_TABLE\"" +
@@ -120,7 +119,7 @@ public List extractData(ResultSet rs) throws SQLException, DataA
       info.properties = rs.getString(8);
       info.retryRetention = rs.getInt(9);
       info.nextTxnId = rs.getLong(10);
-      if (useMinHistoryWriteId) {
+      if (TxnHandler.ConfVars.useMinHistoryWriteId()) {
         info.minOpenWriteId = rs.getLong(11);
       }
       infos.add(info);
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java
new file mode 100644
index 000000000000..d8d2c8da473b
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowCompactHandler.java
@@ -0,0 +1,185 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.ShowCompactRequest;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement;
+import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+import org.apache.hadoop.hive.metastore.txn.entities.CompactionState;
+import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+
+import static org.apache.commons.lang3.StringUtils.isBlank;
+import static org.apache.commons.lang3.StringUtils.isNotBlank;
+
+public class ShowCompactHandler implements QueryHandler {
+  
+  private static final String DEFAULT_POOL_NAME = "default";
+
+  //language=SQL
+  private static final String SHOW_COMPACTION_QUERY =
+      " XX.* FROM ( SELECT " +
+          "  \"CQ_DATABASE\" AS \"CC_DATABASE\", \"CQ_TABLE\" AS \"CC_TABLE\", \"CQ_PARTITION\" AS \"CC_PARTITION\", " +
+          "  \"CQ_STATE\" AS \"CC_STATE\", \"CQ_TYPE\" AS \"CC_TYPE\", \"CQ_WORKER_ID\" AS \"CC_WORKER_ID\", " +
+          "  \"CQ_START\" AS \"CC_START\", -1 \"CC_END\", \"CQ_RUN_AS\" AS \"CC_RUN_AS\", " +
+          "  \"CQ_HADOOP_JOB_ID\" AS \"CC_HADOOP_JOB_ID\", \"CQ_ID\" AS \"CC_ID\", \"CQ_ERROR_MESSAGE\" AS \"CC_ERROR_MESSAGE\", " +
+          "  \"CQ_ENQUEUE_TIME\" AS \"CC_ENQUEUE_TIME\", \"CQ_WORKER_VERSION\" AS \"CC_WORKER_VERSION\", " +
+          "  \"CQ_INITIATOR_ID\" AS \"CC_INITIATOR_ID\", \"CQ_INITIATOR_VERSION\" AS \"CC_INITIATOR_VERSION\", " +
+          "  \"CQ_CLEANER_START\" AS \"CC_CLEANER_START\", \"CQ_POOL_NAME\" AS \"CC_POOL_NAME\", \"CQ_TXN_ID\" AS \"CC_TXN_ID\", " +
+          "  \"CQ_NEXT_TXN_ID\" AS \"CC_NEXT_TXN_ID\", \"CQ_COMMIT_TIME\" AS \"CC_COMMIT_TIME\", " +
+          "  \"CQ_HIGHEST_WRITE_ID\" AS \"CC_HIGHEST_WRITE_ID\" " +
+          "FROM " +
+          "  \"COMPACTION_QUEUE\" " +
+          "UNION ALL " +
+          "SELECT " +
+          "  \"CC_DATABASE\" , \"CC_TABLE\", \"CC_PARTITION\", \"CC_STATE\", \"CC_TYPE\", \"CC_WORKER_ID\", " +
+          "  \"CC_START\", \"CC_END\", \"CC_RUN_AS\", \"CC_HADOOP_JOB_ID\", \"CC_ID\", \"CC_ERROR_MESSAGE\", " +
+          "  \"CC_ENQUEUE_TIME\", \"CC_WORKER_VERSION\", \"CC_INITIATOR_ID\", \"CC_INITIATOR_VERSION\", " +
+          "   -1 , \"CC_POOL_NAME\", \"CC_TXN_ID\", \"CC_NEXT_TXN_ID\", \"CC_COMMIT_TIME\", " +
+          "  \"CC_HIGHEST_WRITE_ID\"" +
+          "FROM " +
+          "  \"COMPLETED_COMPACTIONS\" ) XX " +
+          "WHERE " +
+          "  (\"CC_ID\" = :id OR :id IS NULL) AND " +
+          "  (\"CC_DATABASE\" = :dbName OR :dbName IS NULL) AND " +
+          "  (\"CC_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+          "  (\"CC_PARTITION\" = :partition OR :partition IS NULL) AND " +
+          "  (\"CC_STATE\" = :state OR :state IS NULL) AND " +
+          "  (\"CC_TYPE\" = :type OR :type IS NULL) AND " +
+          "  (\"CC_POOL_NAME\" = :poolName OR :poolName IS NULL)";
+
+  //language=SQL
+  private static final String SHOW_COMPACTION_ORDERBY_CLAUSE =
+      " ORDER BY CASE " +
+          "   WHEN \"CC_END\" > \"CC_START\" and \"CC_END\" > \"CC_COMMIT_TIME\" " +
+          "     THEN \"CC_END\" " +
+          "   WHEN \"CC_START\" > \"CC_COMMIT_TIME\" " +
+          "     THEN \"CC_START\" " +
+          "   ELSE \"CC_COMMIT_TIME\" " +
+          " END desc ," +
+          " \"CC_ENQUEUE_TIME\" asc";
+
+
+  private final ShowCompactRequest request;
+  private final SQLGenerator sqlGenerator;
+
+
+  public ShowCompactHandler(ShowCompactRequest request, SQLGenerator sqlGenerator) {
+    this.request = request;
+    this.sqlGenerator = sqlGenerator;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    String noSelectQuery = SHOW_COMPACTION_QUERY + getShowCompactSortingOrderClause(request);
+    int rowLimit = (int) request.getLimit();    
+    if (rowLimit > 0) {
+      return sqlGenerator.addLimitClause(rowLimit, noSelectQuery);
+    } else {
+    }
+    return "SELECT " + noSelectQuery;
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    Long id = request.getId() > 0 ? request.getId() : null;
+    try {
+      return new MapSqlParameterSource()
+          .addValue("id", id, Types.BIGINT)
+          .addValue("dbName", request.getDbName(), Types.VARCHAR)
+          .addValue("tableName", request.getTbName(), Types.VARCHAR)
+          .addValue("partition", request.getPartName(), Types.VARCHAR)
+          .addValue("state", request.getState(), Types.CHAR)
+          .addValue("type", request.getType() == null ? null : Character.toString(TxnUtils.thriftCompactionType2DbType(request.getType())), Types.CHAR)
+          .addValue("poolName", request.getPoolName(), Types.VARCHAR);
+    } catch (MetaException e) {
+      throw new MetaWrapperException(e);
+    }
+  }
+
+  @Override
+  public ShowCompactResponse extractData(ResultSet rs) throws SQLException, DataAccessException {
+    ShowCompactResponse response = new ShowCompactResponse(new ArrayList<>());
+    while (rs.next()) {
+      ShowCompactResponseElement e = new ShowCompactResponseElement();
+      e.setDbname(rs.getString(1));
+      e.setTablename(rs.getString(2));
+      e.setPartitionname(rs.getString(3));
+      e.setState(CompactionState.fromSqlConst(rs.getString(4)).toString());
+      try {
+        e.setType(TxnUtils.dbCompactionType2ThriftType(rs.getString(5).charAt(0)));
+      } catch (SQLException ex) {
+        //do nothing to handle RU/D if we add another status
+      }
+      e.setWorkerid(rs.getString(6));
+      long start = rs.getLong(7);
+      if (!rs.wasNull()) {
+        e.setStart(start);
+      }
+      long endTime = rs.getLong(8);
+      if (endTime != -1) {
+        e.setEndTime(endTime);
+      }
+      e.setRunAs(rs.getString(9));
+      e.setHadoopJobId(rs.getString(10));
+      e.setId(rs.getLong(11));
+      e.setErrorMessage(rs.getString(12));
+      long enqueueTime = rs.getLong(13);
+      if (!rs.wasNull()) {
+        e.setEnqueueTime(enqueueTime);
+      }
+      e.setWorkerVersion(rs.getString(14));
+      e.setInitiatorId(rs.getString(15));
+      e.setInitiatorVersion(rs.getString(16));
+      long cleanerStart = rs.getLong(17);
+      if (!rs.wasNull() && (cleanerStart != -1)) {
+        e.setCleanerStart(cleanerStart);
+      }
+      String poolName = rs.getString(18);
+      if (isBlank(poolName)) {
+        e.setPoolName(DEFAULT_POOL_NAME);
+      } else {
+        e.setPoolName(poolName);
+      }
+      e.setTxnId(rs.getLong(19));
+      e.setNextTxnId(rs.getLong(20));
+      e.setCommitTime(rs.getLong(21));
+      e.setHightestTxnId(rs.getLong(22));
+      response.addToCompacts(e);      
+    }
+    return response;
+  }
+
+  private String getShowCompactSortingOrderClause(ShowCompactRequest request) {
+    String sortingOrder = request.getOrder();
+    return isNotBlank(sortingOrder) ? "  ORDER BY  " + sortingOrder : SHOW_COMPACTION_ORDERBY_CLAUSE;
+  }
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowLocksHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowLocksHandler.java
new file mode 100644
index 000000000000..eefacad35252
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/ShowLocksHandler.java
@@ -0,0 +1,179 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.LockTypeComparator;
+import org.apache.hadoop.hive.metastore.api.LockState;
+import org.apache.hadoop.hive.metastore.api.LockType;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;
+import org.apache.hadoop.hive.metastore.txn.entities.LockInfo;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.apache.hadoop.hive.metastore.utils.LockTypeUtil;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.io.Serializable;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Comparator;
+import java.util.List;
+
+import static org.apache.hadoop.hive.metastore.txn.entities.LockInfo.LOCK_ACQUIRED;
+import static org.apache.hadoop.hive.metastore.txn.entities.LockInfo.LOCK_WAITING;
+
+public class ShowLocksHandler implements QueryHandler {
+
+  private final ShowLocksRequest request;
+
+  public ShowLocksHandler(ShowLocksRequest request) {
+    this.request = request;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return 
+        "SELECT \"HL_LOCK_EXT_ID\", \"HL_TXNID\", \"HL_DB\", \"HL_TABLE\", \"HL_PARTITION\", \"HL_LOCK_STATE\", " +
+        "\"HL_LOCK_TYPE\", \"HL_LAST_HEARTBEAT\", \"HL_ACQUIRED_AT\", \"HL_USER\", \"HL_HOST\", \"HL_LOCK_INT_ID\"," +
+        "\"HL_BLOCKEDBY_EXT_ID\", \"HL_BLOCKEDBY_INT_ID\", \"HL_AGENT_INFO\" FROM \"HIVE_LOCKS\"" +
+        "WHERE " +
+            "(\"HL_DB\" = :dbName OR :dbName IS NULL) AND " +
+            "(\"HL_TABLE\" = :tableName OR :tableName IS NULL) AND " +
+            "(\"HL_PARTITION\" = :partition OR :partition IS NULL) AND " +
+            "(\"HL_TXNID\" = :txnId OR :txnId IS NULL)";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("dbName", request.getDbname(), Types.VARCHAR)
+        .addValue("tableName", request.getTablename(), Types.VARCHAR)
+        .addValue("partition", request.getPartname(), Types.VARCHAR)
+        .addValue("txnId", request.isSetTxnid() ? request.getTxnid() : null, Types.BIGINT);
+  }
+
+  @Override
+  public ShowLocksResponse extractData(ResultSet rs) throws SQLException, DataAccessException {
+    ShowLocksResponse rsp = new ShowLocksResponse();
+    List elems = new ArrayList<>();
+    List sortedList = new ArrayList<>();
+    while (rs.next()) {
+      ShowLocksResponseElement e = new ShowLocksResponseElement();
+      e.setLockid(rs.getLong(1));
+      long txnid = rs.getLong(2);
+      if (!rs.wasNull()) e.setTxnid(txnid);
+      e.setDbname(rs.getString(3));
+      e.setTablename(rs.getString(4));
+      String partition = rs.getString(5);
+      if (partition != null) e.setPartname(partition);
+      switch (rs.getString(6).charAt(0)) {
+        case LOCK_ACQUIRED:
+          e.setState(LockState.ACQUIRED);
+          break;
+        case LOCK_WAITING:
+          e.setState(LockState.WAITING);
+          break;
+        default:
+          throw new SQLException("Unknown lock state " + rs.getString(6).charAt(0));
+      }
+
+      char lockChar = rs.getString(7).charAt(0);
+      LockType lockType = LockTypeUtil.getLockTypeFromEncoding(lockChar)
+          .orElseThrow(() -> new SQLException("Unknown lock type: " + lockChar));
+      e.setType(lockType);
+
+      e.setLastheartbeat(rs.getLong(8));
+      long acquiredAt = rs.getLong(9);
+      if (!rs.wasNull()) e.setAcquiredat(acquiredAt);
+      e.setUser(rs.getString(10));
+      e.setHostname(rs.getString(11));
+      e.setLockIdInternal(rs.getLong(12));
+      long id = rs.getLong(13);
+      if (!rs.wasNull()) {
+        e.setBlockedByExtId(id);
+      }
+      id = rs.getLong(14);
+      if (!rs.wasNull()) {
+        e.setBlockedByIntId(id);
+      }
+      e.setAgentInfo(rs.getString(15));
+      sortedList.add(new LockInfoExt(e));
+    }
+    //this ensures that "SHOW LOCKS" prints the locks in the same order as they are examined
+    //by checkLock() - makes diagnostics easier.
+    Collections.sort(sortedList, new LockInfoComparator());
+    for(LockInfoExt lockInfoExt : sortedList) {
+      elems.add(lockInfoExt.e);
+    }
+    rsp.setLocks(elems);
+    return rsp;    
+  }
+
+  /**
+   * used to sort entries in {@link org.apache.hadoop.hive.metastore.api.ShowLocksResponse}
+   */
+  private static class LockInfoExt extends LockInfo {
+    private final ShowLocksResponseElement e;
+    LockInfoExt(ShowLocksResponseElement e) {
+      super(e);
+      this.e = e;
+    }
+  }
+
+  private static class LockInfoComparator implements Comparator, Serializable {
+    private final LockTypeComparator lockTypeComparator = new LockTypeComparator();
+
+    public int compare(LockInfo info1, LockInfo info2) {
+      // We sort by state (acquired vs waiting) and then by LockType, then by id
+      if (info1.getState() == LockState.ACQUIRED &&
+          info2.getState() != LockState .ACQUIRED) {
+        return -1;
+      }
+      if (info1.getState() != LockState.ACQUIRED &&
+          info2.getState() == LockState .ACQUIRED) {
+        return 1;
+      }
+
+      int sortByType = lockTypeComparator.compare(info1.getType(), info2.getType());
+      if(sortByType != 0) {
+        return sortByType;
+      }
+      if (info1.getExtLockId() < info2.getExtLockId()) {
+        return -1;
+      } else if (info1.getExtLockId() > info2.getExtLockId()) {
+        return 1;
+      } else {
+        if (info1.getIntLockId() < info2.getIntLockId()) {
+          return -1;
+        } else if (info1.getTxnId() > info2.getIntLockId()) {
+          return 1;
+        } else {
+          return 0;
+        }
+      }
+    }
+  }  
+  
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java
new file mode 100644
index 000000000000..16b768dc34bb
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TablesWithAbortedTxnsHandler.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.entities.TxnStatus;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.Set;
+import java.util.TreeSet;
+
+public class TablesWithAbortedTxnsHandler implements QueryHandler> {
+
+  //language=SQL
+  private static final String SELECT_TABLES_WITH_X_ABORTED_TXNS =
+      "SELECT \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" FROM \"TXN_COMPONENTS\" " +
+          "INNER JOIN \"TXNS\" ON \"TC_TXNID\" = \"TXN_ID\" WHERE \"TXN_STATE\" = :abortedState " +
+          "GROUP BY \"TC_DATABASE\", \"TC_TABLE\", \"TC_PARTITION\" HAVING COUNT(\"TXN_ID\") > :txnThreshold";
+  
+  private final int txnThreshold;
+
+  public TablesWithAbortedTxnsHandler(int txnThreshold) {
+    this.txnThreshold = txnThreshold;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return SELECT_TABLES_WITH_X_ABORTED_TXNS;
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("abortedState", TxnStatus.ABORTED.getSqlConst(), Types.CHAR)
+        .addValue("txnThreshold", txnThreshold);
+  }
+
+  @Override
+  public Set extractData(ResultSet rs) throws SQLException, DataAccessException {
+    Set resourceNames = new TreeSet<>();
+    while (rs.next()) {
+      String resourceName = rs.getString(1) + "." + rs.getString(2);
+      String partName = rs.getString(3);
+      resourceName = partName != null ? resourceName + "#" + partName : resourceName;
+      resourceNames.add(resourceName);
+    }
+    return resourceNames;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TargetTxnIdListHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TargetTxnIdListHandler.java
new file mode 100644
index 000000000000..70d5ed7ecfbf
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TargetTxnIdListHandler.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.ArrayList;
+import java.util.List;
+
+public class TargetTxnIdListHandler implements QueryHandler> {
+  
+  private final String replPolicy;
+  private final List sourceTxnsIds;
+
+  public TargetTxnIdListHandler(String replPolicy, List sourceTxnsIds) {
+    this.replPolicy = replPolicy;
+    this.sourceTxnsIds = sourceTxnsIds;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT \"RTM_TARGET_TXN_ID\" FROM \"REPL_TXN_MAP\" " +
+        "WHERE \"RTM_SRC_TXN_ID\" IN (:txnIds) AND \"RTM_REPL_POLICY\" = :policy";
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("txnIds", sourceTxnsIds, Types.BIGINT)
+        .addValue("policy", replPolicy);
+  }
+
+  @Override
+  public List extractData(ResultSet rs) throws SQLException, DataAccessException {
+    List targetTxnIdList = new ArrayList<>();
+    while (rs.next()) {
+      targetTxnIdList.add(rs.getLong(1));
+    }
+    return targetTxnIdList;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java
new file mode 100644
index 000000000000..9fdb3465d724
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/jdbc/queries/TxnIdForWriteIdHandler.java
@@ -0,0 +1,64 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hive.metastore.txn.jdbc.queries;
+
+import org.apache.hadoop.hive.metastore.DatabaseProduct;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.txn.jdbc.QueryHandler;
+import org.springframework.dao.DataAccessException;
+import org.springframework.jdbc.core.namedparam.MapSqlParameterSource;
+import org.springframework.jdbc.core.namedparam.SqlParameterSource;
+
+import java.sql.ResultSet;
+import java.sql.SQLException;
+
+public class TxnIdForWriteIdHandler implements QueryHandler {
+  
+  private final long writeId;
+  private final String dbName;
+  private final String tableName;
+
+  public TxnIdForWriteIdHandler(long writeId, String dbName, String tableName) {
+    this.writeId = writeId;
+    this.dbName = dbName;
+    this.tableName = tableName;
+  }
+
+  @Override
+  public String getParameterizedQueryString(DatabaseProduct databaseProduct) throws MetaException {
+    return "SELECT \"T2W_TXNID\" FROM \"TXN_TO_WRITE_ID\" WHERE"
+        + " \"T2W_DATABASE\" = ? AND \"T2W_TABLE\" = ? AND \"T2W_WRITEID\" = " + writeId;
+  }
+
+  @Override
+  public SqlParameterSource getQueryParameters() {
+    return new MapSqlParameterSource()
+        .addValue("writeId", writeId)
+        .addValue("dbName", dbName)
+        .addValue("tableName", tableName);
+  }
+
+  @Override
+  public Long extractData(ResultSet rs) throws SQLException, DataAccessException {
+    long txnId = -1;
+    if (rs.next()) {
+      txnId = rs.getLong(1);
+    }
+    return txnId;
+  }
+}
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/RetryPropagation.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/RetryPropagation.java
similarity index 97%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/RetryPropagation.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/RetryPropagation.java
index 9171985b2121..90721440d645 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/RetryPropagation.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/RetryPropagation.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.retryhandling;
+package org.apache.hadoop.hive.metastore.txn.retry;
 
 /**
  * Specifies how the Retry context propagation is done in various situations.
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetry.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetry.java
similarity index 96%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetry.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetry.java
index 6c4b5493cc82..e1427a3d250e 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetry.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetry.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.retryhandling;
+package org.apache.hadoop.hive.metastore.txn.retry;
 
 import java.lang.annotation.ElementType;
 import java.lang.annotation.Retention;
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryCallProperties.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryCallProperties.java
similarity index 98%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryCallProperties.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryCallProperties.java
index b3614211c9c6..213040a60997 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryCallProperties.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryCallProperties.java
@@ -15,7 +15,7 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.retryhandling;
+package org.apache.hadoop.hive.metastore.txn.retry;
 
 /**
  * Contains all the properties which can alter the behavior of a retry-call in 
diff --git a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/NamedForkJoinWorkerThreadFactory.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryException.java
similarity index 54%
rename from upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/NamedForkJoinWorkerThreadFactory.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryException.java
index 2b95f7be961d..ae4634bd2d11 100644
--- a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/NamedForkJoinWorkerThreadFactory.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryException.java
@@ -15,26 +15,22 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.upgrade.acid;
+package org.apache.hadoop.hive.metastore.txn.retry;
 
-import java.util.concurrent.ForkJoinPool;
-import java.util.concurrent.ForkJoinWorkerThread;
+import org.springframework.dao.DataAccessException;
+
+import java.sql.SQLException;
+
+import static org.apache.hadoop.hive.metastore.txn.retry.SqlRetryHandler.MANUAL_RETRY;
 
 /**
- * This class allows specifying a prefix for ForkJoinPool thread names.
+ * This exception can be used to trigger a manual retry in {@link SqlRetryHandler}. Since it is extending
+ * {@link RuntimeException} there is no need to mark it in the throws section of the methods.
  */
-public class NamedForkJoinWorkerThreadFactory implements ForkJoinPool.ForkJoinWorkerThreadFactory {
+public class SqlRetryException extends DataAccessException {
 
-  NamedForkJoinWorkerThreadFactory(String namePrefix) {
-    this.namePrefix = namePrefix;
+  public SqlRetryException(String message) {
+    super(message, new SQLException(message, MANUAL_RETRY));
   }
 
-  private final String namePrefix;
-
-  @Override
-  public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
-    ForkJoinWorkerThread worker = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool);
-    worker.setName(namePrefix + worker.getName());
-    return worker;
-  }
 }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryFunction.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryFunction.java
similarity index 89%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryFunction.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryFunction.java
index 59a25bf39841..8c7445e0ad37 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryFunction.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryFunction.java
@@ -15,9 +15,10 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.retryhandling;
+package org.apache.hadoop.hive.metastore.txn.retry;
 
 import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.thrift.TException;
 import org.springframework.dao.DataAccessException;
 
 import java.sql.SQLException;
@@ -31,6 +32,6 @@
 @FunctionalInterface
 public interface SqlRetryFunction {
   
-  T execute() throws SQLException, DataAccessException, MetaException;
+  T execute() throws SQLException, DataAccessException, TException;
   
 }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryHandler.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryHandler.java
similarity index 85%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryHandler.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryHandler.java
index 780a3186e01a..727ba0b1bf80 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retryhandling/SqlRetryHandler.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/retry/SqlRetryHandler.java
@@ -15,15 +15,16 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn.retryhandling;
+package org.apache.hadoop.hive.metastore.txn.retry;
 
 import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.DatabaseProduct;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
-import org.apache.hadoop.hive.metastore.txn.ContextNode;
 import org.apache.hadoop.hive.metastore.txn.MetaWrapperException;
+import org.apache.hadoop.hive.metastore.utils.StackThreadLocal;
+import org.apache.thrift.TException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.springframework.dao.DataAccessException;
@@ -32,7 +33,6 @@
 import java.sql.SQLException;
 import java.util.Objects;
 import java.util.concurrent.TimeUnit;
-import java.util.concurrent.locks.ReentrantLock;
 import java.util.regex.Pattern;
 
 /**
@@ -43,14 +43,9 @@ public class SqlRetryHandler {
   private static final Logger LOG = LoggerFactory.getLogger(SqlRetryHandler.class);
 
   private static final int ALLOWED_REPEATED_DEADLOCKS = 10;
-  private static final String MANUAL_RETRY = "ManualRetry";
+  static final String MANUAL_RETRY = "ManualRetry";
 
-  private final ThreadLocal> threadLocal = new ThreadLocal<>();
-
-  /**
-   * Derby specific concurrency control
-   */
-  private static final ReentrantLock derbyLock = new ReentrantLock(true);
+  private final StackThreadLocal threadLocal = new StackThreadLocal<>();
 
   private final DatabaseProduct databaseProduct;
   private final long deadlockRetryInterval;
@@ -89,11 +84,11 @@ public SqlRetryHandler(Configuration conf, DatabaseProduct databaseProduct){
    * @param  Type of the result
    * @throws MetaException Thrown in case of execution error.
    */
-  public  Result executeWithRetry(SqlRetryCallProperties properties, SqlRetryFunction function) throws MetaException {
+  public  Result executeWithRetry(SqlRetryCallProperties properties, SqlRetryFunction function) throws TException {
     Objects.requireNonNull(function, "RetryFunction cannot be null!");
     Objects.requireNonNull(properties, "RetryCallProperties cannot be null!");
 
-    if (threadLocal.get() != null && properties.getRetryPropagation().canJoinContext()) {
+    if (threadLocal.isSet() && properties.getRetryPropagation().canJoinContext()) {
       /*
         If there is a context in the ThreadLocal and we are allowed to join it, we can skip establishing a nested retry-call.
       */
@@ -121,34 +116,31 @@ public  Result executeWithRetry(SqlRetryCallProperties properties, SqlRe
     
     try {
       if (properties.isLockInternally()) {
-        lockInternal();
+        databaseProduct.lockInternal();
       }
-      threadLocal.set(new ContextNode<>(threadLocal.get(), new Object()));
+      threadLocal.set(new Object());
       return executeWithRetryInternal(properties, function);
     } finally {
-      ContextNode node = threadLocal.get();
-      if (node != null && node.getParent() != null) {
-        threadLocal.set(node.getParent());
-      } else {
-        threadLocal.remove();
-      }
+      threadLocal.unset();
       if (properties.isLockInternally()) {
-        unlockInternal();
+        databaseProduct.unlockInternal();
       }
     }
   }
 
   private  Result executeWithRetryInternal(SqlRetryCallProperties properties, SqlRetryFunction function) 
-      throws MetaException {
+      throws TException {
     LOG.debug("Running retry function:" + properties);
 
     try {
       return function.execute();
-    } catch (DataAccessException e) {
+    } catch (DataAccessException | SQLException e) {
       SQLException sqlEx = null;
       if (e.getCause() instanceof SQLException) {
         sqlEx = (SQLException) e.getCause();
-      }
+      } else if (e instanceof SQLException) {
+        sqlEx = (SQLException) e;
+      }      
       if (sqlEx != null) {
         if (checkDeadlock(sqlEx, properties)) {
           properties.setDeadlockCount(properties.getDeadlockCount() - 1); 
@@ -165,9 +157,6 @@ private  Result executeWithRetryInternal(SqlRetryCallProperties properti
       //unwrap and re-throw
       LOG.error("Execution failed for caller {}", properties, e.getCause());
       throw (MetaException) e.getCause();
-    } catch (Exception e) {
-      LOG.error("Execution failed for caller {}", properties, e);
-      throw new MetaException("Failed to execute function: " + properties.getCaller() + ", details:" + e.getMessage());
     }
   }
   
@@ -181,6 +170,7 @@ private boolean checkDeadlock(SQLException e, SqlRetryCallProperties properties)
         try {
           Thread.sleep(waitInterval);
         } catch (InterruptedException ie) {
+          Thread.currentThread().interrupt();
           // NOP
         }
         return true;
@@ -231,7 +221,7 @@ private boolean waitForRetry(String caller, String errMsg, int retryCount) {
       try {
         Thread.sleep(retryInterval);
       } catch (InterruptedException ex) {
-        //
+        Thread.currentThread().interrupt();
       }
       return true;
     } else {
@@ -273,21 +263,4 @@ static boolean isRetryable(Configuration conf, Exception ex) {
     }
     return false;
   }
-
-  /**
-   * lockInternal() and {@link #unlockInternal()} are used to serialize those operations that require
-   * Select ... For Update to sequence operations properly.  In practice that means when running
-   * with Derby database.  See more notes at class level.
-   */
-  private void lockInternal() {
-    if(databaseProduct.isDERBY()) {
-      derbyLock.lock();
-    }
-  }
-  private void unlockInternal() {
-    if(databaseProduct.isDERBY()) {
-      derbyLock.unlock();
-    }
-  }
-
 }
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidHouseKeeperService.java
similarity index 65%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidHouseKeeperService.java
index 0a16006c49eb..86799e90621d 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidHouseKeeperService.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidHouseKeeperService.java
@@ -15,19 +15,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn;
+package org.apache.hadoop.hive.metastore.txn.service;
+
+import com.google.common.collect.ImmutableMap;
 
-import org.apache.commons.lang3.Functions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
-import static org.apache.commons.lang3.Functions.FailableRunnable;
+import org.apache.commons.lang3.function.FailableRunnable;
+import org.apache.commons.lang3.function.Failable;
 
 /**
  * Performs background tasks for Transaction management in Hive.
@@ -38,16 +43,27 @@ public class AcidHouseKeeperService implements MetastoreTaskThread {
   private static final Logger LOG = LoggerFactory.getLogger(AcidHouseKeeperService.class);
 
   private Configuration conf;
-  private boolean isCompactorEnabled;
-  private TxnStore txnHandler;
+  protected TxnStore txnHandler;
+  protected String serviceName;
+  protected Map, String> tasks;
+
+  public AcidHouseKeeperService() {
+    serviceName = this.getClass().getSimpleName();
+  }
 
   @Override
   public void setConf(Configuration configuration) {
     conf = configuration;
-    isCompactorEnabled =
-        MetastoreConf.getBoolVar(conf, MetastoreConf.ConfVars.COMPACTOR_INITIATOR_ON) || MetastoreConf.getBoolVar(conf,
-            MetastoreConf.ConfVars.COMPACTOR_CLEANER_ON);
     txnHandler = TxnUtils.getTxnStore(conf);
+    initTasks();
+  }
+
+  protected void initTasks(){
+    tasks = ImmutableMap., String>builder()
+            .put(txnHandler::performTimeOuts, "Cleaning timed out txns and locks")
+            .put(txnHandler::performWriteSetGC, "Cleaning obsolete write set entries")
+            .put(txnHandler::cleanTxnToWriteIdTable, "Cleaning obsolete TXN_TO_WRITE_ID entries")
+            .build();
   }
 
   @Override
@@ -65,12 +81,12 @@ public void run() {
     TxnStore.MutexAPI.LockHandle handle = null;
     try {
       handle = txnHandler.getMutexAPI().acquireLock(TxnStore.MUTEX_KEY.HouseKeeper.name());
-      LOG.info("Starting to run AcidHouseKeeperService.");
+      LOG.info("Starting to run {}", serviceName);
       long start = System.currentTimeMillis();
       cleanTheHouse();
-      LOG.debug("Total time AcidHouseKeeperService took: {} seconds.", elapsedSince(start));
-    } catch (Throwable t) {
-      LOG.error("Unexpected error in thread: {}, message: {}", Thread.currentThread().getName(), t.getMessage(), t);
+      LOG.debug("Total time {} took: {} seconds.", serviceName, elapsedSince(start));
+    } catch (Exception e) {
+      LOG.error("Unexpected exception in thread: {}, message: {}", Thread.currentThread().getName(), e.getMessage(), e);
     } finally {
       if (handle != null) {
         handle.releaseLocks();
@@ -79,18 +95,12 @@ public void run() {
   }
 
   private void cleanTheHouse() {
-    performTask(txnHandler::performTimeOuts, "Cleaning timed out txns and locks");
-    performTask(txnHandler::performWriteSetGC, "Cleaning obsolete write set entries");
-    performTask(txnHandler::cleanTxnToWriteIdTable, "Cleaning obsolete TXN_TO_WRITE_ID entries");
-    if (isCompactorEnabled) {
-      performTask(txnHandler::removeDuplicateCompletedTxnComponents, "Cleaning duplicate COMPLETED_TXN_COMPONENTS entries");
-      performTask(txnHandler::purgeCompactionHistory, "Cleaning obsolete compaction history entries");
-    }
+    tasks.forEach(this::performTask);
   }
 
   private void performTask(FailableRunnable task, String description) {
     long start = System.currentTimeMillis();
-    Functions.run(task);
+    Failable.run(task);
     LOG.debug("{} took {} seconds.", description, elapsedSince(start));
   }
 
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidOpenTxnsCounterService.java
similarity index 88%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidOpenTxnsCounterService.java
index fe788004398c..89dbff3f96fb 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidOpenTxnsCounterService.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidOpenTxnsCounterService.java
@@ -15,10 +15,12 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn;
+package org.apache.hadoop.hive.metastore.txn.service;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -55,8 +57,8 @@ public void run() {
         lastLogTime = now;
       }
     }
-    catch (Throwable t) {
-      LOG.error("Unexpected error in thread: {}, message: {}", Thread.currentThread().getName(), t.getMessage(), t);
+    catch (Exception e) {
+      LOG.error("Unexpected exception in thread: {}, message: {}", Thread.currentThread().getName(), e.getMessage(), e);
     }
   }
 
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidTxnCleanerService.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidTxnCleanerService.java
similarity index 88%
rename from standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidTxnCleanerService.java
rename to standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidTxnCleanerService.java
index d2800bd008bc..06f284faee08 100644
--- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/AcidTxnCleanerService.java
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/AcidTxnCleanerService.java
@@ -15,11 +15,13 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.hive.metastore.txn;
+package org.apache.hadoop.hive.metastore.txn.service;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.MetastoreTaskThread;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.txn.TxnStore;
+import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,8 +62,8 @@ public void run() {
       long start = System.currentTimeMillis();
       txnHandler.cleanEmptyAbortedAndCommittedTxns();
       LOG.debug("Txn cleaner service took: {} seconds.", elapsedSince(start));
-    } catch (Throwable t) {
-      LOG.error("Unexpected error in thread: {}, message: {}", Thread.currentThread().getName(), t.getMessage(), t);
+    } catch (Exception e) {
+      LOG.error("Unexpected exception in thread: {}, message: {}", Thread.currentThread().getName(), e.getMessage(), e);
     } finally {
       if (handle != null) {
         handle.releaseLocks();
diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/CompactionHouseKeeperService.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/CompactionHouseKeeperService.java
new file mode 100644
index 000000000000..6eca48283445
--- /dev/null
+++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/txn/service/CompactionHouseKeeperService.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.service; + +import com.google.common.collect.ImmutableMap; +import org.apache.commons.lang3.function.FailableRunnable; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; + +import java.util.concurrent.TimeUnit; + +/** + * Performs background tasks for Transaction management in Hive. + * Runs inside Hive Metastore Service. + */ +public class CompactionHouseKeeperService extends AcidHouseKeeperService { + + public CompactionHouseKeeperService() { + serviceName = this.getClass().getSimpleName(); + } + + @Override + protected void initTasks(){ + tasks = ImmutableMap., String>builder() + .put(txnHandler::removeDuplicateCompletedTxnComponents, + "Cleaning duplicate COMPLETED_TXN_COMPONENTS entries") + .put(txnHandler::purgeCompactionHistory, "Cleaning obsolete compaction history entries") + .build(); + } + + @Override + public long runFrequency(TimeUnit unit) { + return MetastoreConf.getTimeVar(getConf(), MetastoreConf.ConfVars.COMPACTION_HOUSEKEEPER_SERVICE_INTERVAL, + unit); + } +} diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java index f4afe2aa65d7..3c9e0960bc1e 100644 --- a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java @@ -663,7 +663,8 @@ public static boolean updatePartitionStatsFast(PartitionSpecProxy.PartitionItera if (!madeDir) { // The partition location already existed and may contain data. Lets try to // populate those statistics that don't require a full scan of the data. - LOG.info("Updating partition stats fast for: {}", part.getTableName()); + LOG.info("Updating partition stats fast for: catalog: {} database: {} table: {} partition: {}", + part.getCatName(), part.getDbName(), part.getTableName(), part.getCurrent().getValues()); List fileStatus = wh.getFileStatusesForLocation(part.getLocation()); // TODO: this is invalid for ACID tables, and we cannot access AcidUtils here. populateQuickStats(fileStatus, params); diff --git a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/StackThreadLocal.java b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/StackThreadLocal.java new file mode 100644 index 000000000000..63a4062460ac --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/StackThreadLocal.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.utils; + +import java.util.ArrayDeque; +import java.util.Deque; + +public class StackThreadLocal { + + private final ThreadLocal> threadLocal = new ThreadLocal<>(); + + public void set(T value) { + Deque stack = threadLocal.get(); + if (stack == null) { + stack = new ArrayDeque<>(); + } + stack.push(value); + threadLocal.set(stack); + } + + public void unset() { + Deque stack = threadLocal.get(); + stack.pop(); + if (stack.isEmpty()) { + threadLocal.remove(); + } + } + + public T get() { + Deque stack = threadLocal.get(); + if (stack != null) { + return stack.peek(); + } else { + throw new IllegalStateException("There is no context to return!"); + } + } + + public boolean isSet() { + return threadLocal.get() != null; + } + +} \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0-beta-2.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql similarity index 99% rename from standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0-beta-2.derby.sql rename to standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql index 9feaa6a0c3ff..4ef57f4960d6 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0-beta-2.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.0.0.derby.sql @@ -288,7 +288,7 @@ CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME"); CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); -CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID"); +CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("TBL_ID", "PART_NAME"); CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID"); @@ -874,4 +874,4 @@ ALTER TABLE "PACKAGES" ADD CONSTRAINT "PACKAGES_FK1" FOREIGN KEY ("DB_ID") REFER -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- -INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0-beta-2', 'Hive release version 4.0.0-beta-2'); +INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql new file mode 100644 index 000000000000..6216a47e9ac1 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/derby/hive-schema-4.1.0.derby.sql @@ -0,0 +1,877 @@ +-- Timestamp: 2011-09-22 15:32:02.024 +-- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb +-- Specified schema is: APP +-- appendLogs: false + +-- ---------------------------------------------- +-- DDL Statements for functions +-- ---------------------------------------------- + +CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ; + +CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ; + +-- ---------------------------------------------- +-- DDL Statements for tables +-- ---------------------------------------------- +CREATE TABLE "APP"."DBS" ( + "DB_ID" BIGINT NOT NULL, + "DESC" VARCHAR(4000), + "DB_LOCATION_URI" VARCHAR(4000) NOT NULL, + "NAME" VARCHAR(128), + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + "CTLG_NAME" VARCHAR(256) NOT NULL DEFAULT 'hive', + "CREATE_TIME" INTEGER, + "DB_MANAGED_LOCATION_URI" VARCHAR(4000), + "TYPE" VARCHAR(32) DEFAULT 'NATIVE' NOT NULL, + "DATACONNECTOR_NAME" VARCHAR(128), + "REMOTE_DBNAME" VARCHAR(128) +); + +CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); + +CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."DC_PRIVS" ("DC_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "NAME" VARCHAR(128), "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DC_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0); + +CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER); + +CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT); + +CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767)); + +CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."PARTITION_EVENTS" ( + "PART_NAME_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_TIME" BIGINT NOT NULL, + "EVENT_TYPE" INTEGER NOT NULL, + "PARTITION_NAME" VARCHAR(767), + "TBL_NAME" VARCHAR(256) +); + +CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128)); + +CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N', "WRITE_ID" BIGINT DEFAULT 0); + +CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128)); + +CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL); + +CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL); + +CREATE TABLE "APP"."TAB_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "BIT_VECTOR" BLOB, + "ENGINE" VARCHAR(128) NOT NULL, + "HISTOGRAM" BLOB +); + +CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL); + +CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB); + +CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000)); + +CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767)); + +CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767)); + +CREATE TABLE "APP"."PART_COL_STATS"( + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TABLE_NAME" VARCHAR(256) NOT NULL, + "PARTITION_NAME" VARCHAR(767) NOT NULL, + "COLUMN_NAME" VARCHAR(767) NOT NULL, + "COLUMN_TYPE" VARCHAR(128) NOT NULL, + "LONG_LOW_VALUE" BIGINT, + "LONG_HIGH_VALUE" BIGINT, + "DOUBLE_LOW_VALUE" DOUBLE, + "DOUBLE_HIGH_VALUE" DOUBLE, + "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000), + "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000), + "NUM_DISTINCTS" BIGINT, + "BIT_VECTOR" BLOB, + "NUM_NULLS" BIGINT NOT NULL, + "AVG_COL_LEN" DOUBLE, + "MAX_COL_LEN" BIGINT, + "NUM_TRUES" BIGINT, + "NUM_FALSES" BIGINT, + "LAST_ANALYZED" BIGINT, + "CS_ID" BIGINT NOT NULL, + "PART_ID" BIGINT NOT NULL, + "ENGINE" VARCHAR(128) NOT NULL, + "HISTOGRAM" BLOB +); + +CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255)); + +CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10)); + +CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL); + +CREATE TABLE "APP"."NOTIFICATION_LOG" ( + "NL_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "MESSAGE" CLOB, + "TBL_NAME" VARCHAR(256), + "MESSAGE_FORMAT" VARCHAR(16) +); + +CREATE UNIQUE INDEX "APP"."NOTIFICATION_LOG_EVENT_ID" ON "APP"."NOTIFICATION_LOG" ("EVENT_ID"); + +CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); + +CREATE TABLE "APP"."KEY_CONSTRAINTS" ( + "CHILD_CD_ID" BIGINT, + "CHILD_INTEGER_IDX" INTEGER, + "CHILD_TBL_ID" BIGINT, + "PARENT_CD_ID" BIGINT, + "PARENT_INTEGER_IDX" INTEGER, + "PARENT_TBL_ID" BIGINT NOT NULL, + "POSITION" BIGINT NOT NULL, + "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, + "CONSTRAINT_TYPE" SMALLINT NOT NULL, + "UPDATE_RULE" SMALLINT, + "DELETE_RULE" SMALLINT, + "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, + "DEFAULT_VALUE" VARCHAR(400) +); + +CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ( + "PROPERTY_KEY" VARCHAR(255) NOT NULL, + "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, + "DESCRIPTION" VARCHAR(1000), + "PROPERTYCONTENT" BLOB +); + +CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NS VARCHAR(128), NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT); + +CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024)); + +CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0); + +CREATE TABLE "APP"."WM_POOL_TO_TRIGGER" (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL); + +CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER); + +CREATE TABLE "APP"."MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "CAT_NAME" VARCHAR(256) NOT NULL, + "DB_NAME" VARCHAR(128) NOT NULL, + "TBL_NAME" VARCHAR(256) NOT NULL, + "TXN_LIST" CLOB, + "MATERIALIZATION_TIME" BIGINT NOT NULL +); + +CREATE TABLE "APP"."MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" BIGINT NOT NULL, + "TBL_ID" BIGINT NOT NULL, + "INSERTED_COUNT" BIGINT NOT NULL DEFAULT 0, + "UPDATED_COUNT" BIGINT NOT NULL DEFAULT 0, + "DELETED_COUNT" BIGINT NOT NULL DEFAULT 0 +); + +CREATE TABLE "APP"."CTLGS" ( + "CTLG_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL, + "CREATE_TIME" INTEGER); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "APP"."CTLGS" ("CTLG_ID", "NAME", "DESC", "LOCATION_URI", "CREATE_TIME") +VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL); + +-- ---------------------------------------------- +-- DML Statements +-- ---------------------------------------------- + +INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE"); + +INSERT INTO "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_VAL" FROM "APP"."SEQUENCE_TABLE" WHERE "SEQUENCE_NAME" = 'org.apache.hadoop.hive.metastore.model.MNotificationLog'); + +-- ---------------------------------------------- +-- DDL Statements for indexes +-- ---------------------------------------------- + +CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."DCPRIVILEGEINDEX" ON "APP"."DC_PRIVS" ("AUTHORIZER", "NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DC_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME","CAT_NAME"); + +CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("DB_NAME", "TABLE_NAME", "COLUMN_NAME", "CAT_NAME"); + +CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME"); + +CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME"); + +CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME"); + +CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("TBL_ID", "PART_NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID"); + +CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID"); + +CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID"); + +CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID"); + +CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NS", "NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME"); + +CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME"); + + +-- ---------------------------------------------- +-- DDL Statements for keys +-- ---------------------------------------------- + +-- primary/unique + +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID"); + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID"); + +ALTER TABLE "APP"."DC_PRIVS" ADD CONSTRAINT "DC_PRIVS_PK" PRIMARY KEY ("DC_GRANT_ID"); + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + +ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME"); + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID"); + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID"); + +ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID"); + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID"); + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID"); + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID"); + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME"); + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID"); + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID"); + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID"); + +ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID"); + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID"); + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID"); + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID"); + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX"); + +ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID"); + +ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID"); + +ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "ONE_ROW_CONSTRAINT" CHECK (NNI_ID = 1); + +ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("PARENT_TBL_ID", "CONSTRAINT_NAME", "POSITION"); + +ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + +ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID"); + + +-- foreign +ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID"); + +ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_PK" PRIMARY KEY ("TBL_ID", "MV_CREATION_METADATA_ID"); + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION; + +ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- ---------------------------------------------- +-- DDL Statements for checks +-- ---------------------------------------------- + +ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N')); + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT integer, + TXN_TYPE integer +); + +INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, TXN_HOST) + VALUES(0, 'c', 0, 0, '', ''); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(256), + TC_PARTITION varchar(767), + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint +); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint, + CTC_UPDATE_DELETE char(1) NOT NULL +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE TXN_LOCK_TBL ( + TXN_LOCK bigint NOT NULL +); +INSERT INTO TXN_LOCK_TBL VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(256), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT integer, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +); + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(256) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_ENQUEUE_TIME bigint, + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID bigint, + CQ_META_INFO varchar(2048) for bit data, + CQ_HADOOP_JOB_ID varchar(32), + CQ_ERROR_MESSAGE clob, + CQ_NEXT_TXN_ID bigint, + CQ_TXN_ID bigint, + CQ_COMMIT_TIME bigint, + CQ_INITIATOR_ID varchar(128), + CQ_INITIATOR_VERSION varchar(128), + CQ_WORKER_VERSION varchar(128), + CQ_CLEANER_START bigint, + CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0, + CQ_POOL_NAME varchar(128), + CQ_NUMBER_OF_BUCKETS integer, + CQ_ORDER_BY varchar(4000) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(256) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_ENQUEUE_TIME bigint, + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID bigint, + CC_META_INFO varchar(2048) for bit data, + CC_HADOOP_JOB_ID varchar(32), + CC_ERROR_MESSAGE clob, + CC_NEXT_TXN_ID bigint, + CC_TXN_ID bigint, + CC_COMMIT_TIME bigint, + CC_INITIATOR_ID varchar(128), + CC_INITIATOR_VERSION varchar(128), + CC_WORKER_VERSION varchar(128), + CC_POOL_NAME varchar(128), + CC_NUMBER_OF_BUCKETS integer, + CC_ORDER_BY varchar(4000) +); + +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); + +-- HIVE-25842 +CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_DATABASE varchar(128) NOT NULL, + CMC_TABLE varchar(256) NOT NULL, + CMC_PARTITION varchar(767), + CMC_METRIC_TYPE varchar(128) NOT NULL, + CMC_METRIC_VALUE integer NOT NULL, + CMC_VERSION integer NOT NULL +); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +--1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK +--This is a good candidate for Index orgainzed table +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(256) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_WRITE_ID ( + MH_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID), + MH_DATABASE varchar(128) NOT NULL, + MH_TABLE varchar(256) NOT NULL, + MH_WRITEID bigint NOT NULL +); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, + PRIMARY KEY(MHL_TXNID) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( + MRL_TXN_ID BIGINT NOT NULL, + MRL_DB_NAME VARCHAR(128) NOT NULL, + MRL_TBL_NAME VARCHAR(256) NOT NULL, + MRL_LAST_HEARTBEAT BIGINT NOT NULL, + PRIMARY KEY(MRL_TXN_ID) +); + +CREATE TABLE "APP"."I_SCHEMA" ( + "SCHEMA_ID" bigint primary key, + "SCHEMA_TYPE" integer not null, + "NAME" varchar(256) unique, + "DB_ID" bigint references "APP"."DBS" ("DB_ID"), + "COMPATIBILITY" integer not null, + "VALIDATION_LEVEL" integer not null, + "CAN_EVOLVE" char(1) not null, + "SCHEMA_GROUP" varchar(256), + "DESCRIPTION" varchar(4000) +); + +CREATE TABLE "APP"."SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" bigint primary key, + "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"), + "VERSION" integer not null, + "CREATED_AT" bigint not null, + "CD_ID" bigint references "APP"."CDS" ("CD_ID"), + "STATE" integer not null, + "DESCRIPTION" varchar(4000), + "SCHEMA_TEXT" clob, + "FINGERPRINT" varchar(256), + "SCHEMA_VERSION_NAME" varchar(256), + "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID") +); + +CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION"); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +); + +CREATE TABLE "APP"."RUNTIME_STATS" ( + "RS_ID" bigint primary key, + "CREATE_TIME" integer not null, + "WEIGHT" integer not null, + "PAYLOAD" BLOB +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( + WNL_ID bigint NOT NULL, + WNL_TXNID bigint NOT NULL, + WNL_WRITEID bigint NOT NULL, + WNL_DATABASE varchar(128) NOT NULL, + WNL_TABLE varchar(256) NOT NULL, + WNL_PARTITION varchar(767) NOT NULL, + WNL_TABLE_OBJ clob NOT NULL, + WNL_PARTITION_OBJ clob, + WNL_FILES clob, + WNL_EVENT_TIME integer NOT NULL, + PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION) +); +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); + +CREATE TABLE "APP"."SCHEDULED_QUERIES" ( + "SCHEDULED_QUERY_ID" bigint primary key not null, + "SCHEDULE_NAME" varchar(256) not null, + "ENABLED" CHAR(1) NOT NULL DEFAULT 'N', + "CLUSTER_NAMESPACE" varchar(256) not null, + "USER" varchar(128) not null, + "SCHEDULE" varchar(256) not null, + "QUERY" varchar(4000) not null, + "NEXT_EXECUTION" integer, + "ACTIVE_EXECUTION_ID" bigint +); + +CREATE INDEX NEXTEXECUTIONINDEX ON APP.SCHEDULED_QUERIES (ENABLED,CLUSTER_NAMESPACE,NEXT_EXECUTION); +CREATE UNIQUE INDEX UNIQUE_SCHEDULED_QUERIES_NAME ON APP.SCHEDULED_QUERIES (SCHEDULE_NAME,CLUSTER_NAMESPACE); + +CREATE TABLE APP.SCHEDULED_EXECUTIONS ( + SCHEDULED_EXECUTION_ID bigint primary key not null, + EXECUTOR_QUERY_ID VARCHAR(256), + SCHEDULED_QUERY_ID bigint not null, + START_TIME integer not null, + END_TIME INTEGER, + LAST_UPDATE_TIME INTEGER, + ERROR_MESSAGE VARCHAR(2000), + STATE VARCHAR(256), + CONSTRAINT SCHEDULED_EXECUTIONS_SCHQ_FK FOREIGN KEY (SCHEDULED_QUERY_ID) REFERENCES APP.SCHEDULED_QUERIES(SCHEDULED_QUERY_ID) ON DELETE CASCADE +); + +CREATE INDEX LASTUPDATETIMEINDEX ON APP.SCHEDULED_EXECUTIONS (LAST_UPDATE_TIME); +CREATE INDEX SCHEDULED_EXECUTIONS_SCHQID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); +CREATE UNIQUE INDEX SCHEDULED_EXECUTIONS_UNIQUE_ID ON APP.SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); + +--HIVE-23516 +CREATE TABLE "APP"."REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(10000), + "RM_START_TIME" integer not null, + "MESSAGE_FORMAT" VARCHAR(16) DEFAULT 'json-0.2', + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +CREATE INDEX "POLICY_IDX" ON "APP"."REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "APP"."REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + +-- Create stored procedure tables +CREATE TABLE "APP"."STORED_PROCS" ( + "SP_ID" BIGINT NOT NULL, + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) NOT NULL, + "OWNER_NAME" VARCHAR(128) NOT NULL, + "SOURCE" clob NOT NULL, + PRIMARY KEY ("SP_ID") +); + +CREATE UNIQUE INDEX "UNIQUESTOREDPROC" ON "STORED_PROCS" ("NAME", "DB_ID"); +ALTER TABLE "STORED_PROCS" ADD CONSTRAINT "STOREDPROC_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID"); + +CREATE TABLE "APP"."DATACONNECTORS" ("NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(32) NOT NULL, "URL" VARCHAR(4000) NOT NULL, "COMMENT" VARCHAR(256), "OWNER_NAME" VARCHAR(256), "OWNER_TYPE" VARCHAR(10), "CREATE_TIME" INTEGER NOT NULL); +CREATE TABLE "APP"."DATACONNECTOR_PARAMS" ("NAME" VARCHAR(128) NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000)); +ALTER TABLE "APP"."DATACONNECTORS" ADD CONSTRAINT "DATACONNECTORS_KEY_PK" PRIMARY KEY ("NAME"); +ALTER TABLE "APP"."DATACONNECTOR_PARAMS" ADD CONSTRAINT "DATACONNECTOR_PARAMS_KEY_PK" PRIMARY KEY ("NAME", "PARAM_KEY"); +ALTER TABLE "APP"."DATACONNECTOR_PARAMS" ADD CONSTRAINT "NAME_FK1" FOREIGN KEY ("NAME") REFERENCES "APP"."DATACONNECTORS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; +ALTER TABLE "APP"."DC_PRIVS" ADD CONSTRAINT "DC_PRIVS_FK1" FOREIGN KEY ("NAME") REFERENCES "APP"."DATACONNECTORS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION; + +-- Create stored procedure packages +CREATE TABLE "APP"."PACKAGES" ( + "PKG_ID" BIGINT NOT NULL, + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) NOT NULL, + "OWNER_NAME" VARCHAR(128) NOT NULL, + "HEADER" clob NOT NULL, + "BODY" clob NOT NULL, + PRIMARY KEY ("PKG_ID") +); + +CREATE UNIQUE INDEX "UNIQUEPKG" ON "PACKAGES" ("NAME", "DB_ID"); +ALTER TABLE "PACKAGES" ADD CONSTRAINT "PACKAGES_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID"); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.1.0', 'Hive release version 4.1.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.derby.sql index d24722518a23..b17732541070 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.derby.sql +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.derby.sql @@ -31,18 +31,22 @@ ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD PROPERTYCONTENT BLOB; -- HIVE-27457 UPDATE "SDS" - SET "SDS"."INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', - "SDS"."OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' - WHERE "SDS"."SD_ID" IN ( - SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%' + SET "INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', "OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' + WHERE "SD_ID" IN ( + SELECT "TBLS"."SD_ID" + FROM "TBLS" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); + UPDATE "SERDES" SET "SERDES"."SLIB" = 'org.apache.hadoop.hive.kudu.KuduSerDe' WHERE "SERDE_ID" IN ( SELECT "SDS"."SERDE_ID" FROM "TBLS" - LEFT JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" - WHERE "TBL_ID" IN (SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%') + INNER JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); -- This needs to be the last thing done. Insert any changes above this line. diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.derby.sql deleted file mode 100644 index 2695e10742c8..000000000000 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.derby.sql +++ /dev/null @@ -1,6 +0,0 @@ --- HIVE-24815 Remove "IDXS" Table from Metastore Schema -DROP TABLE "APP"."INDEX_PARAMS"; -DROP TABLE "APP"."IDXS"; - --- This needs to be the last thing done. Insert any changes above this line. -UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0-beta-2', VERSION_COMMENT='Hive release version 4.0.0-beta-2' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-beta-1-to-4.0.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-beta-1-to-4.0.0.derby.sql new file mode 100644 index 000000000000..69ce0927bb96 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-beta-1-to-4.0.0.derby.sql @@ -0,0 +1,10 @@ +-- HIVE-24815 Remove "IDXS" Table from Metastore Schema +DROP TABLE "APP"."INDEX_PARAMS"; +DROP TABLE "APP"."IDXS"; + +-- HIVE-27827 +DROP INDEX "APP"."UNIQUEPARTITION"; +CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("TBL_ID", "PART_NAME"); + +-- This needs to be the last thing done. Insert any changes above this line. +UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql new file mode 100644 index 000000000000..12028a9b4608 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade-4.0.0-to-4.1.0.derby.sql @@ -0,0 +1,2 @@ +-- This needs to be the last thing done. Insert any changes above this line. +UPDATE "APP".VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release version 4.1.0' where VER_ID=1; \ No newline at end of file diff --git a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade.order.derby b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade.order.derby index 809b719e7324..58500aa07e6c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/derby/upgrade.order.derby +++ b/standalone-metastore/metastore-server/src/main/sql/derby/upgrade.order.derby @@ -19,4 +19,5 @@ 3.2.0-to-4.0.0-alpha-1 4.0.0-alpha-1-to-4.0.0-alpha-2 4.0.0-alpha-2-to-4.0.0-beta-1 -4.0.0-beta-1-to-4.0.0-beta-2 +4.0.0-beta-1-to-4.0.0 +4.0.0-to-4.1.0 diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0-beta-2.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql similarity index 99% rename from standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0-beta-2.mssql.sql rename to standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql index 190a245c2b66..7866e87eba1e 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0-beta-2.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql @@ -747,9 +747,7 @@ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFEREN CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); -CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID); - -CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID,PART_NAME); -- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] @@ -1424,4 +1422,4 @@ CREATE INDEX DC_PRIVS_N49 ON DC_PRIVS (NAME); -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- -INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0-beta-2', 'Hive release version 4.0.0-beta-2'); +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql new file mode 100644 index 000000000000..c32de734c6c4 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/hive-schema-4.1.0.mssql.sql @@ -0,0 +1,1425 @@ +-- Licensed to the Apache Software Foundation (ASF) under one or more +-- contributor license agreements. See the NOTICE file distributed with +-- this work for additional information regarding copyright ownership. +-- The ASF licenses this file to You under the Apache License, Version 2.0 +-- (the "License"); you may not use this file except in compliance with +-- the License. You may obtain a copy of the License at +-- +-- http://www.apache.org/licenses/LICENSE-2.0 +-- +-- Unless required by applicable law or agreed to in writing, software +-- distributed under the License is distributed on an "AS IS" BASIS, +-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-- See the License for the specific language governing permissions and +-- limitations under the License. + +------------------------------------------------------------------ +-- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15) +------------------------------------------------------------------ +-- Complete schema required for the following classes:- +-- org.apache.hadoop.hive.metastore.model.MColumnDescriptor +-- org.apache.hadoop.hive.metastore.model.MDBPrivilege +-- org.apache.hadoop.hive.metastore.model.MDatabase +-- org.apache.hadoop.hive.metastore.model.MDelegationToken +-- org.apache.hadoop.hive.metastore.model.MFieldSchema +-- org.apache.hadoop.hive.metastore.model.MFunction +-- org.apache.hadoop.hive.metastore.model.MGlobalPrivilege +-- org.apache.hadoop.hive.metastore.model.MIndex +-- org.apache.hadoop.hive.metastore.model.MMasterKey +-- org.apache.hadoop.hive.metastore.model.MOrder +-- org.apache.hadoop.hive.metastore.model.MPartition +-- org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege +-- org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics +-- org.apache.hadoop.hive.metastore.model.MPartitionEvent +-- org.apache.hadoop.hive.metastore.model.MPartitionPrivilege +-- org.apache.hadoop.hive.metastore.model.MResourceUri +-- org.apache.hadoop.hive.metastore.model.MRole +-- org.apache.hadoop.hive.metastore.model.MRoleMap +-- org.apache.hadoop.hive.metastore.model.MSerDeInfo +-- org.apache.hadoop.hive.metastore.model.MStorageDescriptor +-- org.apache.hadoop.hive.metastore.model.MStringList +-- org.apache.hadoop.hive.metastore.model.MTable +-- org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege +-- org.apache.hadoop.hive.metastore.model.MTableColumnStatistics +-- org.apache.hadoop.hive.metastore.model.MTablePrivilege +-- org.apache.hadoop.hive.metastore.model.MType +-- org.apache.hadoop.hive.metastore.model.MVersionTable +-- +-- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE MASTER_KEYS +( + KEY_ID int NOT NULL, + MASTER_KEY nvarchar(767) NULL +); + +ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID); + +-- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics] +CREATE TABLE PART_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" nvarchar(767) NOT NULL, + COLUMN_TYPE nvarchar(128) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL, + BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + BIT_VECTOR varbinary(max) NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + PART_ID bigint NULL, + PARTITION_NAME nvarchar(767) NOT NULL, + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL, + "ENGINE" nvarchar(128) NOT NULL, + HISTOGRAM varbinary(max) NULL +); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID); + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME, CAT_NAME); + +-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +CREATE TABLE PART_PRIVS +( + PART_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PART_ID bigint NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + PART_PRIV nvarchar(128) NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID); + +-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList] +CREATE TABLE SKEWED_STRING_LIST +( + STRING_LIST_ID bigint NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID); + +-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole] +CREATE TABLE ROLES +( + ROLE_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + OWNER_NAME nvarchar(128) NULL, + ROLE_NAME nvarchar(128) NULL +); + +ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID); + +-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition] +CREATE TABLE PARTITIONS +( + PART_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + LAST_ACCESS_TIME int NOT NULL, + PART_NAME nvarchar(767) NULL, + SD_ID bigint NULL, + TBL_ID bigint NULL, + WRITE_ID bigint NOT NULL DEFAULT 0); + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); + +-- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] +CREATE TABLE CDS +( + CD_ID bigint NOT NULL +); + +ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID); + +-- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable] +CREATE TABLE VERSION +( + VER_ID bigint NOT NULL, + SCHEMA_VERSION nvarchar(127) NOT NULL, + VERSION_COMMENT nvarchar(255) NOT NULL +); + +ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID); + +-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE TABLE GLOBAL_PRIVS +( + USER_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + USER_PRIV nvarchar(128) NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID); + +-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +CREATE TABLE PART_COL_PRIVS +( + PART_COLUMN_GRANT_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(767) NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PART_ID bigint NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + PART_COL_PRIV nvarchar(128) NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID); + +-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +CREATE TABLE DB_PRIVS +( + DB_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + DB_PRIV nvarchar(128) NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID); + +-- Table DC_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDCPrivilege] +CREATE TABLE DC_PRIVS +( + DC_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + NAME nvarchar(128) NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + DC_PRIV nvarchar(128) NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE DC_PRIVS ADD CONSTRAINT DC_PRIVS_PK PRIMARY KEY (DC_GRANT_ID); + + +-- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics] +CREATE TABLE TAB_COL_STATS +( + CS_ID bigint NOT NULL, + AVG_COL_LEN float NULL, + "COLUMN_NAME" nvarchar(767) NOT NULL, + COLUMN_TYPE nvarchar(128) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL, + BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL, + DOUBLE_HIGH_VALUE float NULL, + DOUBLE_LOW_VALUE float NULL, + LAST_ANALYZED bigint NOT NULL, + LONG_HIGH_VALUE bigint NULL, + LONG_LOW_VALUE bigint NULL, + MAX_COL_LEN bigint NULL, + NUM_DISTINCTS bigint NULL, + BIT_VECTOR varbinary(max) NULL, + NUM_FALSES bigint NULL, + NUM_NULLS bigint NOT NULL, + NUM_TRUES bigint NULL, + TBL_ID bigint NULL, + "TABLE_NAME" nvarchar(256) NOT NULL, + "CAT_NAME" nvarchar(256) NOT NULL, + "ENGINE" nvarchar(128) NOT NULL, + HISTOGRAM varbinary(max) NULL +); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID); +CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (DB_NAME, TABLE_NAME, COLUMN_NAME, CAT_NAME); + +-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType] +CREATE TABLE TYPES +( + TYPES_ID bigint NOT NULL, + TYPE_NAME nvarchar(128) NULL, + TYPE1 nvarchar(767) NULL, + TYPE2 nvarchar(767) NULL +); + +ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID); + +-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +CREATE TABLE TBL_PRIVS +( + TBL_GRANT_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + TBL_PRIV nvarchar(128) NULL, + TBL_ID bigint NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID); + +-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE TABLE DBS +( + DB_ID bigint NOT NULL, + "DESC" nvarchar(4000) NULL, + DB_LOCATION_URI nvarchar(4000) NOT NULL, + "NAME" nvarchar(128) NULL, + OWNER_NAME nvarchar(128) NULL, + OWNER_TYPE nvarchar(10) NULL, + CTLG_NAME nvarchar(256) DEFAULT 'hive', + CREATE_TIME INT, + DB_MANAGED_LOCATION_URI nvarchar(4000) NULL, + TYPE nvarchar(32) DEFAULT 'NATIVE' NOT NULL, + DATACONNECTOR_NAME nvarchar(128) NULL, + REMOTE_DBNAME nvarchar(128) NULL +); + +ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); + +-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +CREATE TABLE TBL_COL_PRIVS +( + TBL_COLUMN_GRANT_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(767) NULL, + CREATE_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + TBL_COL_PRIV nvarchar(128) NULL, + TBL_ID bigint NULL, + AUTHORIZER nvarchar(128) NULL +); + +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID); + +-- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE DELEGATION_TOKENS +( + TOKEN_IDENT nvarchar(767) NOT NULL, + TOKEN nvarchar(767) NULL +); + +ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT); + +-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo] +CREATE TABLE SERDES +( + SERDE_ID bigint NOT NULL, + "NAME" nvarchar(128) NULL, + SLIB nvarchar(4000) NULL, + "DESCRIPTION" nvarchar(4000), + "SERIALIZER_CLASS" nvarchar(4000), + "DESERIALIZER_CLASS" nvarchar(4000), + "SERDE_TYPE" int +); + +ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID); + +-- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction] +CREATE TABLE FUNCS +( + FUNC_ID bigint NOT NULL, + CLASS_NAME nvarchar(4000) NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + FUNC_NAME nvarchar(128) NULL, + FUNC_TYPE int NOT NULL, + OWNER_NAME nvarchar(128) NULL, + OWNER_TYPE nvarchar(10) NULL +); + +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID); + +-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap] +CREATE TABLE ROLE_MAP +( + ROLE_GRANT_ID bigint NOT NULL, + ADD_TIME int NOT NULL, + GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)), + GRANTOR nvarchar(128) NULL, + GRANTOR_TYPE nvarchar(128) NULL, + PRINCIPAL_NAME nvarchar(128) NULL, + PRINCIPAL_TYPE nvarchar(128) NULL, + ROLE_ID bigint NULL +); + +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID); + +-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable] +CREATE TABLE TBLS +( + TBL_ID bigint NOT NULL, + CREATE_TIME int NOT NULL, + DB_ID bigint NULL, + LAST_ACCESS_TIME int NOT NULL, + OWNER nvarchar(767) NULL, + OWNER_TYPE nvarchar(10) NULL, + RETENTION int NOT NULL, + SD_ID bigint NULL, + TBL_NAME nvarchar(256) NULL, + TBL_TYPE nvarchar(128) NULL, + VIEW_EXPANDED_TEXT text NULL, + VIEW_ORIGINAL_TEXT text NULL, + IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0, + WRITE_ID bigint NOT NULL DEFAULT 0); + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); + +-- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] +CREATE TABLE MV_CREATION_METADATA +( + MV_CREATION_METADATA_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NOT NULL, + DB_NAME nvarchar(128) NOT NULL, + TBL_NAME nvarchar(256) NOT NULL, + TXN_LIST text NULL, + MATERIALIZATION_TIME bigint NOT NULL +); + +ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); +CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME); + + +CREATE TABLE MV_TABLES_USED +( + MV_CREATION_METADATA_ID bigint NOT NULL, + TBL_ID bigint NOT NULL, + INSERTED_COUNT bigint NOT NULL DEFAULT 0, + UPDATED_COUNT bigint NOT NULL DEFAULT 0, + DELETED_COUNT bigint NOT NULL DEFAULT 0 +); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_PK PRIMARY KEY (TBL_ID, MV_CREATION_METADATA_ID); +ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID); +ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(TBL_ID) REFERENCES TBLS (TBL_ID); + +-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +CREATE TABLE SDS +( + SD_ID bigint NOT NULL, + CD_ID bigint NULL, + INPUT_FORMAT nvarchar(4000) NULL, + IS_COMPRESSED bit NOT NULL, + IS_STOREDASSUBDIRECTORIES bit NOT NULL, + LOCATION nvarchar(4000) NULL, + NUM_BUCKETS int NOT NULL, + OUTPUT_FORMAT nvarchar(4000) NULL, + SERDE_ID bigint NULL +); + +ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); + +-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE TABLE PARTITION_EVENTS +( + PART_NAME_ID bigint NOT NULL, + CAT_NAME nvarchar(256) NULL, + DB_NAME nvarchar(128) NULL, + EVENT_TIME bigint NOT NULL, + EVENT_TYPE int NOT NULL, + PARTITION_NAME nvarchar(767) NULL, + TBL_NAME nvarchar(256) NULL +); + +ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID); + +-- Table SORT_COLS for join relationship +CREATE TABLE SORT_COLS +( + SD_ID bigint NOT NULL, + "COLUMN_NAME" nvarchar(767) NULL, + "ORDER" int NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table SKEWED_COL_NAMES for join relationship +CREATE TABLE SKEWED_COL_NAMES +( + SD_ID bigint NOT NULL, + SKEWED_COL_NAME nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table SKEWED_COL_VALUE_LOC_MAP for join relationship +CREATE TABLE SKEWED_COL_VALUE_LOC_MAP +( + SD_ID bigint NOT NULL, + STRING_LIST_ID_KID bigint NOT NULL, + LOCATION nvarchar(4000) NULL +); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID); + +-- Table SKEWED_STRING_LIST_VALUES for join relationship +CREATE TABLE SKEWED_STRING_LIST_VALUES +( + STRING_LIST_ID bigint NOT NULL, + STRING_LIST_VALUE nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX); + +-- Table PARTITION_KEY_VALS for join relationship +CREATE TABLE PARTITION_KEY_VALS +( + PART_ID bigint NOT NULL, + PART_KEY_VAL nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); + +-- Table PARTITION_KEYS for join relationship +CREATE TABLE PARTITION_KEYS +( + TBL_ID bigint NOT NULL, + PKEY_COMMENT nvarchar(4000) NULL, + PKEY_NAME nvarchar(128) NOT NULL, + PKEY_TYPE nvarchar(767) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME); + +-- Table SKEWED_VALUES for join relationship +CREATE TABLE SKEWED_VALUES +( + SD_ID_OID bigint NOT NULL, + STRING_LIST_ID_EID bigint NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX); + +-- Table SD_PARAMS for join relationship +CREATE TABLE SD_PARAMS +( + SD_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY); + +-- Table FUNC_RU for join relationship +CREATE TABLE FUNC_RU +( + FUNC_ID bigint NOT NULL, + RESOURCE_TYPE int NOT NULL, + RESOURCE_URI nvarchar(4000) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX); + +-- Table TYPE_FIELDS for join relationship +CREATE TABLE TYPE_FIELDS +( + TYPE_NAME bigint NOT NULL, + COMMENT nvarchar(256) NULL, + FIELD_NAME nvarchar(128) NOT NULL, + FIELD_TYPE nvarchar(767) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME); + +-- Table BUCKETING_COLS for join relationship +CREATE TABLE BUCKETING_COLS +( + SD_ID bigint NOT NULL, + BUCKET_COL_NAME nvarchar(255) NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table DATABASE_PARAMS for join relationship +CREATE TABLE DATABASE_PARAMS +( + DB_ID bigint NOT NULL, + PARAM_KEY nvarchar(180) NOT NULL, + PARAM_VALUE nvarchar(4000) NULL +); + +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY); + +-- Table COLUMNS_V2 for join relationship +CREATE TABLE COLUMNS_V2 +( + CD_ID bigint NOT NULL, + COMMENT nvarchar(4000) NULL, + "COLUMN_NAME" nvarchar(767) NOT NULL, + TYPE_NAME varchar(max) NOT NULL, + INTEGER_IDX int NOT NULL +); + +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); + +-- Table SERDE_PARAMS for join relationship +CREATE TABLE SERDE_PARAMS +( + SERDE_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY); + +-- Table PARTITION_PARAMS for join relationship +CREATE TABLE PARTITION_PARAMS +( + PART_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY); + +-- Table TABLE_PARAMS for join relationship +CREATE TABLE TABLE_PARAMS +( + TBL_ID bigint NOT NULL, + PARAM_KEY nvarchar(256) NOT NULL, + PARAM_VALUE varchar(max) NULL +); + +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY); + +CREATE TABLE NOTIFICATION_LOG +( + NL_ID bigint NOT NULL, + EVENT_ID bigint NOT NULL, + EVENT_TIME int NOT NULL, + EVENT_TYPE nvarchar(32) NOT NULL, + CAT_NAME nvarchar(128) NULL, + DB_NAME nvarchar(128) NULL, + TBL_NAME nvarchar(256) NULL, + MESSAGE_FORMAT nvarchar(16), + MESSAGE text NULL +); + +ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID); + +CREATE UNIQUE INDEX NOTIFICATION_LOG_EVENT_ID ON NOTIFICATION_LOG (EVENT_ID); + +CREATE TABLE NOTIFICATION_SEQUENCE +( + NNI_ID bigint NOT NULL, + NEXT_EVENT_ID bigint NOT NULL +); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT ONE_ROW_CONSTRAINT CHECK (NNI_ID = 1); + +INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 WHERE NOT EXISTS (SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); + +-- Tables to manage resource plans. + +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + NS nvarchar(128), + QUERY_PARALLELISM int, + STATUS nvarchar(20) NOT NULL, + DEFAULT_POOL_ID bigint +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE TABLE WM_POOL +( + POOL_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + PATH nvarchar(1024) NOT NULL, + ALLOC_FRACTION float, + QUERY_PARALLELISM int, + SCHEDULING_POLICY nvarchar(1024) +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + "NAME" nvarchar(128) NOT NULL, + TRIGGER_EXPRESSION nvarchar(1024), + ACTION_EXPRESSION nvarchar(1024), + IS_IN_UNMANAGED bit NOT NULL DEFAULT 0 +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID bigint NOT NULL, + TRIGGER_ID bigint NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +CREATE TABLE WM_MAPPING +( + MAPPING_ID bigint NOT NULL, + RP_ID bigint NOT NULL, + ENTITY_TYPE nvarchar(128) NOT NULL, + ENTITY_NAME nvarchar(128) NOT NULL, + POOL_ID bigint, + ORDERING int +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +CREATE TABLE CTLGS ( + CTLG_ID bigint primary key, + "NAME" nvarchar(256), + "DESC" nvarchar(4000), + LOCATION_URI nvarchar(4000) not null, + CREATE_TIME INT +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL); + +CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME"); + +-- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey] + +-- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics] +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); + + +-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID); + + +-- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList] + +-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole] +CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME); + + +-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition] +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); + +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID,PART_NAME); + + +-- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor] + +-- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable] + +-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID); + +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID); + + +-- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics] +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID); + + +-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType] +CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME); + + +-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID); + +CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME"); + + +-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID); + + +-- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken] + +-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo] + +-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction] +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID); + +CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID); + + +-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap] +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ; + +CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID); + +CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable] +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE INDEX TBLS_N50 ON TBLS (SD_ID); + +CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID); + +CREATE INDEX TBLS_N49 ON TBLS (DB_ID); + + +-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ; + +ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ; + +CREATE INDEX SDS_N50 ON SDS (CD_ID); + +CREATE INDEX SDS_N49 ON SDS (SERDE_ID); + + +-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME); + + +-- Constraints for table SORT_COLS +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID); + + +-- Constraints for table SKEWED_COL_NAMES +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID); + + +-- Constraints for table SKEWED_COL_VALUE_LOC_MAP +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID); + +CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID); + + +-- Constraints for table SKEWED_STRING_LIST_VALUES +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID); + + +-- Constraints for table PARTITION_KEY_VALS +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); + + +-- Constraints for table PARTITION_KEYS +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID); + + +-- Constraints for table SKEWED_VALUES +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ; + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ; + +CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID); + +CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID); + + +-- Constraints for table SD_PARAMS +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID); + + +-- Constraints for table FUNC_RU +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ; + +CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); + + +-- Constraints for table TYPE_FIELDS +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ; + +CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME); + + +-- Constraints for table BUCKETING_COLS +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ; + +CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID); + + +-- Constraints for table DATABASE_PARAMS +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ; + +CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID); + +-- Constraints for table COLUMNS_V2 +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ; + +CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID); + + +-- Constraints for table SERDE_PARAMS +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ; + +CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID); + + +-- Constraints for table PARTITION_PARAMS +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ; + +CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID); + + +-- Constraints for table TABLE_PARAMS +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ; + +CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); + +-- Constraints for resource plan tables. + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NS", "NAME"); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME"); +-- ----------------------------------------------------------------------------------------------------------------------------------------------- +-- Transaction and Lock Tables +-- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file. +-- ----------------------------------------------------------------------------------------------------------------------------------------------- +CREATE TABLE COMPACTION_QUEUE( + CQ_ID bigint NOT NULL, + CQ_DATABASE nvarchar(128) NOT NULL, + CQ_TABLE nvarchar(256) NOT NULL, + CQ_PARTITION nvarchar(767) NULL, + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES nvarchar(2048) NULL, + CQ_WORKER_ID nvarchar(128) NULL, + CQ_ENQUEUE_TIME bigint NULL, + CQ_START bigint NULL, + CQ_RUN_AS nvarchar(128) NULL, + CQ_HIGHEST_WRITE_ID bigint NULL, + CQ_META_INFO varbinary(2048) NULL, + CQ_HADOOP_JOB_ID nvarchar(128) NULL, + CQ_ERROR_MESSAGE varchar(max) NULL, + CQ_NEXT_TXN_ID bigint NULL, + CQ_TXN_ID bigint NULL, + CQ_COMMIT_TIME bigint NULL, + CQ_INITIATOR_ID nvarchar(128) NULL, + CQ_INITIATOR_VERSION nvarchar(128) NULL, + CQ_WORKER_VERSION nvarchar(128) NULL, + CQ_CLEANER_START bigint NULL, + CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0, + CQ_POOL_NAME nvarchar(128) NULL, + CQ_NUMBER_OF_BUCKETS integer, + CQ_ORDER_BY varchar(4000), +PRIMARY KEY CLUSTERED +( + CQ_ID ASC +) +); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint NOT NULL, + CC_DATABASE nvarchar(128) NOT NULL, + CC_TABLE nvarchar(256) NOT NULL, + CC_PARTITION nvarchar(767) NULL, + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES nvarchar(2048) NULL, + CC_WORKER_ID nvarchar(128) NULL, + CC_ENQUEUE_TIME bigint NULL, + CC_START bigint NULL, + CC_END bigint NULL, + CC_RUN_AS nvarchar(128) NULL, + CC_HIGHEST_WRITE_ID bigint NULL, + CC_META_INFO varbinary(2048) NULL, + CC_HADOOP_JOB_ID nvarchar(128) NULL, + CC_ERROR_MESSAGE varchar(max) NULL, + CC_NEXT_TXN_ID bigint NULL, + CC_TXN_ID bigint NULL, + CC_COMMIT_TIME bigint NULL, + CC_INITIATOR_ID nvarchar(128) NULL, + CC_INITIATOR_VERSION nvarchar(128) NULL, + CC_WORKER_VERSION nvarchar(128) NULL, + CC_POOL_NAME nvarchar(128) NULL, + CC_NUMBER_OF_BUCKETS integer, + CC_ORDER_BY varchar(4000), +PRIMARY KEY CLUSTERED +( + CC_ID ASC +) +); + +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); + +-- HIVE-25842 +CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_DATABASE nvarchar(128) NOT NULL, + CMC_TABLE nvarchar(256) NOT NULL, + CMC_PARTITION nvarchar(767) NULL, + CMC_METRIC_TYPE nvarchar(128) NOT NULL, + CMC_METRIC_VALUE int NOT NULL, + CMC_VERSION int NOT NULL +); + +CREATE TABLE COMPLETED_TXN_COMPONENTS( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE nvarchar(128) NOT NULL, + CTC_TABLE nvarchar(256) NULL, + CTC_PARTITION nvarchar(767) NULL, + CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint, + CTC_UPDATE_DELETE char(1) NOT NULL +); + + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE HIVE_LOCKS( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB nvarchar(128) NOT NULL, + HL_TABLE nvarchar(256) NULL, + HL_PARTITION nvarchar(767) NULL, + HL_LOCK_STATE char(1) NOT NULL, + HL_LOCK_TYPE char(1) NOT NULL, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint NULL, + HL_USER nvarchar(128) NOT NULL, + HL_HOST nvarchar(128) NOT NULL, + HL_HEARTBEAT_COUNT int NULL, + HL_AGENT_INFO nvarchar(128) NULL, + HL_BLOCKEDBY_EXT_ID bigint NULL, + HL_BLOCKEDBY_INT_ID bigint NULL, +PRIMARY KEY CLUSTERED +( + HL_LOCK_EXT_ID ASC, + HL_LOCK_INT_ID ASC +) +); + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID( + NCQ_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE NEXT_LOCK_ID( + NL_NEXT bigint NOT NULL +); + +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE TXN_LOCK_TBL( + TXN_LOCK bigint NOT NULL +); + +INSERT INTO TXN_LOCK_TBL VALUES(1); + +CREATE TABLE TXNS( + TXN_ID bigint NOT NULL IDENTITY(1,1), + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER nvarchar(128) NOT NULL, + TXN_HOST nvarchar(128) NOT NULL, + TXN_AGENT_INFO nvarchar(128) NULL, + TXN_META_INFO nvarchar(128) NULL, + TXN_HEARTBEAT_COUNT int NULL, + TXN_TYPE int NULL, +PRIMARY KEY CLUSTERED +( + TXN_ID ASC +) +); +SET IDENTITY_INSERT TXNS ON; +INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, TXN_HOST) + VALUES(0, 'c', 0, 0, '', ''); + +CREATE TABLE TXN_COMPONENTS( + TC_TXNID bigint NOT NULL, + TC_DATABASE nvarchar(128) NOT NULL, + TC_TABLE nvarchar(256) NULL, + TC_PARTITION nvarchar(767) NULL, + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint +); + +ALTER TABLE TXN_COMPONENTS WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID); + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 nvarchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT nvarchar(255) NULL, + PRIMARY KEY CLUSTERED +( + MT_KEY1 ASC, + MT_KEY2 ASC +) +); + +CREATE TABLE KEY_CONSTRAINTS +( + CHILD_CD_ID BIGINT, + CHILD_INTEGER_IDX INT, + CHILD_TBL_ID BIGINT, + PARENT_CD_ID BIGINT, + PARENT_INTEGER_IDX INT NOT NULL, + PARENT_TBL_ID BIGINT NOT NULL, + POSITION INT NOT NULL, + CONSTRAINT_NAME VARCHAR(400) NOT NULL, + CONSTRAINT_TYPE SMALLINT NOT NULL, + UPDATE_RULE SMALLINT, + DELETE_RULE SMALLINT, + ENABLE_VALIDATE_RELY SMALLINT NOT NULL, + DEFAULT_VALUE VARCHAR(400) +); + +ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (PARENT_TBL_ID, CONSTRAINT_NAME, POSITION); + +CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); + +CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); + +CREATE TABLE WRITE_SET ( + WS_DATABASE nvarchar(128) NOT NULL, + WS_TABLE nvarchar(256) NOT NULL, + WS_PARTITION nvarchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE METASTORE_DB_PROPERTIES ( + PROPERTY_KEY VARCHAR(255) NOT NULL, + PROPERTY_VALUE VARCHAR(1000) NOT NULL, + DESCRIPTION VARCHAR(1000), + PROPERTYCONTENT varbinary(max) +); + +ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE nvarchar(128) NOT NULL, + T2W_TABLE nvarchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE nvarchar(128) NOT NULL, + NWI_TABLE nvarchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_WRITE_ID ( + MH_TXNID bigint NOT NULL, + MH_DATABASE nvarchar(128) NOT NULL, + MH_TABLE nvarchar(256) NOT NULL, + MH_WRITEID bigint NOT NULL +); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, +PRIMARY KEY CLUSTERED +( + MHL_TXNID ASC +) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( + MRL_TXN_ID bigint NOT NULL, + MRL_DB_NAME nvarchar(128) NOT NULL, + MRL_TBL_NAME nvarchar(256) NOT NULL, + MRL_LAST_HEARTBEAT bigint NOT NULL, +PRIMARY KEY CLUSTERED +( + MRL_TXN_ID ASC +) +); + +CREATE TABLE "I_SCHEMA" ( + "SCHEMA_ID" bigint primary key, + "SCHEMA_TYPE" int not null, + "NAME" nvarchar(256) unique, + "DB_ID" bigint references "DBS" ("DB_ID"), + "COMPATIBILITY" int not null, + "VALIDATION_LEVEL" int not null, + "CAN_EVOLVE" bit not null, + "SCHEMA_GROUP" nvarchar(256), + "DESCRIPTION" nvarchar(4000), +); + +CREATE TABLE "SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" bigint primary key, + "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"), + "VERSION" int not null, + "CREATED_AT" bigint not null, + "CD_ID" bigint references "CDS" ("CD_ID"), + "STATE" int not null, + "DESCRIPTION" nvarchar(4000), + "SCHEMA_TEXT" varchar(max), + "FINGERPRINT" nvarchar(256), + "SCHEMA_VERSION_NAME" nvarchar(256), + "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), + unique ("SCHEMA_ID", "VERSION") +); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY nvarchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL +); + +ALTER TABLE REPL_TXN_MAP ADD CONSTRAINT REPL_TXN_MAP_PK PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID); + +-- Table SEQUENCE_TABLE is an internal table required by DataNucleus. +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE SEQUENCE_TABLE +( + SEQUENCE_NAME nvarchar(256) NOT NULL, + NEXT_VAL bigint NOT NULL +); + +CREATE UNIQUE INDEX PART_TABLE_PK ON SEQUENCE_TABLE (SEQUENCE_NAME); + +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +CREATE TABLE RUNTIME_STATS ( + RS_ID bigint primary key, + CREATE_TIME bigint NOT NULL, + WEIGHT bigint NOT NULL, + PAYLOAD varbinary(max) +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( + WNL_ID bigint NOT NULL, + WNL_TXNID bigint NOT NULL, + WNL_WRITEID bigint NOT NULL, + WNL_DATABASE nvarchar(128) NOT NULL, + WNL_TABLE nvarchar(256) NOT NULL, + WNL_PARTITION nvarchar(767) NOT NULL, + WNL_TABLE_OBJ text NOT NULL, + WNL_PARTITION_OBJ text, + WNL_FILES text, + WNL_EVENT_TIME int NOT NULL +); + +ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); + +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); + +CREATE TABLE "SCHEDULED_QUERIES" ( + "SCHEDULED_QUERY_ID" bigint NOT NULL, + "CLUSTER_NAMESPACE" VARCHAR(256), + "ENABLED" bit NOT NULL DEFAULT 0, + "NEXT_EXECUTION" INTEGER, + "QUERY" VARCHAR(4000), + "SCHEDULE" VARCHAR(256), + "SCHEDULE_NAME" VARCHAR(256), + "USER" VARCHAR(256), + "ACTIVE_EXECUTION_ID" bigint, + CONSTRAINT SCHEDULED_QUERIES_PK PRIMARY KEY ("SCHEDULED_QUERY_ID") +); + +CREATE TABLE "SCHEDULED_EXECUTIONS" ( + "SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "END_TIME" INTEGER, + "ERROR_MESSAGE" VARCHAR(2000), + "EXECUTOR_QUERY_ID" VARCHAR(256), + "LAST_UPDATE_TIME" INTEGER, + "SCHEDULED_QUERY_ID" bigint, + "START_TIME" INTEGER, + "STATE" VARCHAR(256), + CONSTRAINT SCHEDULED_EXECUTIONS_PK PRIMARY KEY ("SCHEDULED_EXECUTION_ID"), + CONSTRAINT SCHEDULED_EXECUTIONS_SCHQ_FK FOREIGN KEY ("SCHEDULED_QUERY_ID") REFERENCES "SCHEDULED_QUERIES"("SCHEDULED_QUERY_ID") ON DELETE CASCADE +); + +CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); +CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); + +-- HIVE-23516 +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint PRIMARY KEY, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(max), + "RM_PROGRESS" varchar(max), + "RM_START_TIME" integer NOT NULL, + "MESSAGE_FORMAT" nvarchar(16) DEFAULT 'json-0.2', +); + +-- Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + +-- Create stored procedure tables +CREATE TABLE "STORED_PROCS" ( + "SP_ID" BIGINT NOT NULL, + "CREATE_TIME" int NOT NULL, + "DB_ID" BIGINT NOT NULL, + "NAME" nvarchar(256) NOT NULL, + "OWNER_NAME" nvarchar(128) NOT NULL, + "SOURCE" NTEXT NOT NULL, + PRIMARY KEY ("SP_ID") +); + +CREATE UNIQUE INDEX "UNIQUESTOREDPROC" ON "STORED_PROCS" ("NAME", "DB_ID"); +ALTER TABLE "STORED_PROCS" ADD CONSTRAINT "STOREDPROC_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID"); + + +-- Create stored procedure packages +CREATE TABLE "PACKAGES" ( + "PKG_ID" BIGINT NOT NULL, + "CREATE_TIME" int NOT NULL, + "DB_ID" BIGINT NOT NULL, + "NAME" nvarchar(256) NOT NULL, + "OWNER_NAME" nvarchar(128) NOT NULL, + "HEADER" NTEXT NOT NULL, + "BODY" NTEXT NOT NULL, + PRIMARY KEY ("PKG_ID") +); + +CREATE UNIQUE INDEX "UNIQUEPKG" ON "PACKAGES" ("NAME", "DB_ID"); +ALTER TABLE "PACKAGES" ADD CONSTRAINT "PACKAGES_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID"); + +-- HIVE-24396 +-- Create DataConnectors and DataConnector_Params tables +CREATE TABLE "DATACONNECTORS" ( + "NAME" nvarchar(128) NOT NULL, + "TYPE" nvarchar(32) NOT NULL, + "URL" nvarchar(4000) NOT NULL, + "COMMENT" nvarchar(256), + "OWNER_NAME" nvarchar(256), + "OWNER_TYPE" nvarchar(10), + "CREATE_TIME" int NOT NULL, + PRIMARY KEY ("NAME") +); + +CREATE TABLE "DATACONNECTOR_PARAMS"( + "NAME" nvarchar(128) NOT NULL, + "PARAM_KEY" nvarchar(180) NOT NULL, + "PARAM_VALUE" nvarchar(4000), + PRIMARY KEY ("NAME", "PARAM_KEY"), + CONSTRAINT DATACONNECTOR_NAME_FK1 FOREIGN KEY ("NAME") REFERENCES "DATACONNECTORS" ("NAME") ON DELETE CASCADE +); + +-- Constraints for table DC_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDCPrivilege] +ALTER TABLE DC_PRIVS ADD CONSTRAINT DC_PRIVS_FK1 FOREIGN KEY (NAME) REFERENCES DATACONNECTORS (NAME) ; + +CREATE UNIQUE INDEX DCPRIVILEGEINDEX ON DC_PRIVS (AUTHORIZER,NAME,PRINCIPAL_NAME,PRINCIPAL_TYPE,DC_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DC_PRIVS_N49 ON DC_PRIVS (NAME); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.1.0', 'Hive release version 4.1.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mssql.sql index 5712d24847b7..e4253ac1ff94 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mssql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mssql.sql @@ -31,18 +31,22 @@ ALTER TABLE METASTORE_DB_PROPERTIES ADD PROPERTYCONTENT varbinary(max); -- HIVE-27457 UPDATE "SDS" - SET "SDS"."INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', - "SDS"."OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' - WHERE "SDS"."SD_ID" IN ( - SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%' + SET "INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', "OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' + WHERE "SD_ID" IN ( + SELECT "TBLS"."SD_ID" + FROM "TBLS" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); + UPDATE "SERDES" SET "SERDES"."SLIB" = 'org.apache.hadoop.hive.kudu.KuduSerDe' WHERE "SERDE_ID" IN ( SELECT "SDS"."SERDE_ID" FROM "TBLS" - LEFT JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" - WHERE "TBL_ID" IN (SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%') + INNER JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); -- These lines need to be last. Insert any changes above. diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.mssql.sql deleted file mode 100644 index a7afd05ceb3a..000000000000 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.mssql.sql +++ /dev/null @@ -1,9 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2' AS MESSAGE; - --- HIVE-24815: Remove "IDXS" Table from Metastore Schema -DROP TABLE INDEX_PARAMS; -DROP TABLE IDXS; - --- These lines need to be last. Insert any changes above. -UPDATE VERSION SET SCHEMA_VERSION='4.0.0-beta-2', VERSION_COMMENT='Hive release version 4.0.0-beta-2' where VER_ID=1; -SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-beta-1-to-4.0.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-beta-1-to-4.0.0.mssql.sql new file mode 100644 index 000000000000..6bb3a37e9384 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-beta-1-to-4.0.0.mssql.sql @@ -0,0 +1,14 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0' AS MESSAGE; + +-- HIVE-24815: Remove "IDXS" Table from Metastore Schema +DROP TABLE INDEX_PARAMS; +DROP TABLE IDXS; + +-- HIVE-27827 +DROP INDEX UNIQUEPARTITION ON PARTITIONS; +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID,PART_NAME); +DROP INDEX PARTITIONS_N50 ON PARTITIONS; + +-- These lines need to be last. Insert any changes above. +UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql new file mode 100644 index 000000000000..723f5642dd22 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade-4.0.0-to-4.1.0.mssql.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE; + +-- These lines need to be last. Insert any changes above. +UPDATE VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release version 4.1.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade.order.mssql b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade.order.mssql index c12e1d9ed7cc..77b5f2ad2a49 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade.order.mssql +++ b/standalone-metastore/metastore-server/src/main/sql/mssql/upgrade.order.mssql @@ -13,4 +13,5 @@ 3.2.0-to-4.0.0-alpha-1 4.0.0-alpha-1-to-4.0.0-alpha-2 4.0.0-alpha-2-to-4.0.0-beta-1 -4.0.0-beta-1-to-4.0.0-beta-2 +4.0.0-beta-1-to-4.0.0 +4.0.0-to-4.1.0 diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-alpha-2.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-alpha-2.mysql.sql index 7aa3d46f2f41..33085004cf81 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-alpha-2.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-alpha-2.mysql.sql @@ -892,9 +892,9 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` PRIMARY KEY (`NNI_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -ALTER TABLE `NOTIFICATION_SEQUENCE` MODIFY COLUMN `NNI_ID` BIGINT(20) GENERATED ALWAYS AS (1) STORED NOT NULL; +ALTER TABLE `NOTIFICATION_SEQUENCE` ADD CONSTRAINT `ONE_ROW_CONSTRAINT` CHECK (`NNI_ID` = 1); -INSERT INTO `NOTIFICATION_SEQUENCE` (`NEXT_EVENT_ID`) SELECT * from (select 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0; +INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT 1,1 FROM DUAL WHERE NOT EXISTS (SELECT 1 FROM `NOTIFICATION_SEQUENCE`); CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` ( diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-1.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-1.mysql.sql index 04a99721fc64..a99b9c5afb4f 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-1.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-1.mysql.sql @@ -894,9 +894,9 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` PRIMARY KEY (`NNI_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -ALTER TABLE `NOTIFICATION_SEQUENCE` MODIFY COLUMN `NNI_ID` BIGINT(20) GENERATED ALWAYS AS (1) STORED NOT NULL; +ALTER TABLE `NOTIFICATION_SEQUENCE` ADD CONSTRAINT `ONE_ROW_CONSTRAINT` CHECK (`NNI_ID` = 1); -INSERT INTO `NOTIFICATION_SEQUENCE` (`NEXT_EVENT_ID`) SELECT * from (select 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0; +INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT 1,1 FROM DUAL WHERE NOT EXISTS (SELECT 1 FROM `NOTIFICATION_SEQUENCE`); CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` ( diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-2.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql similarity index 99% rename from standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-2.mysql.sql rename to standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql index d97577f7d211..816a33d21bde 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0-beta-2.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql @@ -191,8 +191,7 @@ CREATE TABLE IF NOT EXISTS `PARTITIONS` ( `TBL_ID` bigint(20) DEFAULT NULL, `WRITE_ID` bigint(20) DEFAULT 0, PRIMARY KEY (`PART_ID`), - UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), - KEY `PARTITIONS_N49` (`TBL_ID`), + UNIQUE KEY `UNIQUEPARTITION` (`TBL_ID`, `PART_NAME`), KEY `PARTITIONS_N50` (`SD_ID`), CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) @@ -851,9 +850,9 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` PRIMARY KEY (`NNI_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; -ALTER TABLE `NOTIFICATION_SEQUENCE` MODIFY COLUMN `NNI_ID` BIGINT(20) GENERATED ALWAYS AS (1) STORED NOT NULL; +ALTER TABLE `NOTIFICATION_SEQUENCE` ADD CONSTRAINT `ONE_ROW_CONSTRAINT` CHECK (`NNI_ID` = 1); -INSERT INTO `NOTIFICATION_SEQUENCE` (`NEXT_EVENT_ID`) SELECT * from (select 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0; +INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT 1,1 FROM DUAL WHERE NOT EXISTS (SELECT 1 FROM `NOTIFICATION_SEQUENCE`); CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` ( @@ -1344,7 +1343,7 @@ CREATE TABLE IF NOT EXISTS `DC_PRIVS` ( -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- -INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0-beta-2', 'Hive release version 4.0.0-beta-2'); +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0'); /*!40101 SET character_set_client = @saved_cs_client */; /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql new file mode 100644 index 000000000000..9f3b88353329 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/hive-schema-4.1.0.mysql.sql @@ -0,0 +1,1359 @@ +-- MySQL dump 10.13 Distrib 5.5.25, for osx10.6 (i386) +-- +-- Host: localhost Database: test +-- ------------------------------------------------------ +-- Server version 5.5.25 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Table structure for table `BUCKETING_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `BUCKETING_COLS_N49` (`SD_ID`), + CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `CDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint(20) NOT NULL, + PRIMARY KEY (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `COLUMNS_V2` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint(20) NOT NULL, + `COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE_NAME` MEDIUMTEXT DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`CD_ID`,`COLUMN_NAME`), + KEY `COLUMNS_V2_N49` (`CD_ID`), + CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DATABASE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_ID`,`PARAM_KEY`), + KEY `DATABASE_PARAMS_N49` (`DB_ID`), + CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +CREATE TABLE `CTLGS` ( + `CTLG_ID` BIGINT PRIMARY KEY, + `NAME` VARCHAR(256), + `DESC` VARCHAR(4000), + `LOCATION_URI` VARCHAR(4000) NOT NULL, + `CREATE_TIME` INT(11), + UNIQUE KEY `UNIQUE_CATALOG` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO `CTLGS` VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL); + +-- +-- Table structure for table `DBS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint(20) NOT NULL, + `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CTLG_NAME` varchar(256) NOT NULL DEFAULT 'hive', + `CREATE_TIME` INT(11), + `DB_MANAGED_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `TYPE` VARCHAR(32) DEFAULT 'NATIVE' NOT NULL, + `DATACONNECTOR_NAME` VARCHAR(128), + `REMOTE_DBNAME` VARCHAR(128), + PRIMARY KEY (`DB_ID`), + UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`), + CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DB_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_GRANT_ID`), + UNIQUE KEY `DBPRIVILEGEINDEX` (`AUTHORIZER`,`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `DB_PRIVS_N49` (`DB_ID`), + CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `GLOBAL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`USER_GRANT_ID`), + UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`AUTHORIZER`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `NUCLEUS_TABLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` ( + `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`CLASS_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITIONS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + `WRITE_ID` bigint(20) DEFAULT 0, + PRIMARY KEY (`PART_ID`), + UNIQUE KEY `UNIQUEPARTITION` (`TBL_ID`, `PART_NAME`), + KEY `PARTITIONS_N50` (`SD_ID`), + CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_EVENTS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` ( + `PART_NAME_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `EVENT_TIME` bigint(20) NOT NULL, + `EVENT_TYPE` int(11) NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_NAME_ID`), + KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEYS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint(20) NOT NULL, + `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TBL_ID`,`PKEY_NAME`), + KEY `PARTITION_KEYS_N49` (`TBL_ID`), + CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEY_VALS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint(20) NOT NULL, + `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`PART_ID`,`INTEGER_IDX`), + KEY `PARTITION_KEY_VALS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_ID`,`PARAM_KEY`), + KEY `PARTITION_PARAMS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_COLUMN_GRANT_ID`), + KEY `PART_COL_PRIVS_N49` (`PART_ID`), + KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_GRANT_ID`), + KEY `PARTPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `PART_PRIVS_N49` (`PART_ID`), + CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`ROLE_ID`), + UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLE_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint(20) NOT NULL, + `ADD_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`ROLE_GRANT_ID`), + UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `ROLE_MAP_N49` (`ROLE_ID`), + CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint(20) NOT NULL, + `CD_ID` bigint(20) DEFAULT NULL, + `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `IS_COMPRESSED` bit(1) NOT NULL, + `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `NUM_BUCKETS` int(11) NOT NULL, + `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERDE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`SD_ID`), + KEY `SDS_N49` (`SERDE_ID`), + KEY `SDS_N50` (`CD_ID`), + CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`), + CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SD_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`PARAM_KEY`), + KEY `SD_PARAMS_N49` (`SD_ID`), + CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SEQUENCE_TABLE` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NEXT_VAL` bigint(20) NOT NULL, + PRIMARY KEY (`SEQUENCE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +-- +-- Table structure for table `SERDES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint(20) NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DESCRIPTION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DESERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERDE_TYPE` integer, + PRIMARY KEY (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SERDE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`), + KEY `SERDE_PARAMS_N49` (`SERDE_ID`), + CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_NAMES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint(20) NOT NULL, + `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SKEWED_COL_NAMES_N49` (`SD_ID`), + CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint(20) NOT NULL, + `STRING_LIST_ID_KID` bigint(20) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`), + KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint(20) NOT NULL, + `STRING_LIST_ID_EID` bigint(20) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`), + KEY `SKEWED_VALUES_N50` (`SD_ID_OID`), + KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`), + CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SORT_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ORDER` int(11) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SORT_COLS_N49` (`SD_ID`), + CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TABLE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TBL_ID`,`PARAM_KEY`), + KEY `TABLE_PARAMS_N49` (`TBL_ID`), + CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `MV_CREATION_METADATA` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` ( + `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TXN_LIST` TEXT DEFAULT NULL, + `MATERIALIZATION_TIME` bigint(20) NOT NULL, + PRIMARY KEY (`MV_CREATION_METADATA_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME, DB_NAME) USING BTREE; + +-- +-- Table structure for table `TBLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `RETENTION` int(11) NOT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `VIEW_EXPANDED_TEXT` mediumtext, + `VIEW_ORIGINAL_TEXT` mediumtext, + `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0, + `WRITE_ID` bigint(20) DEFAULT 0, + PRIMARY KEY (`TBL_ID`), + UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), + KEY `TBLS_N50` (`SD_ID`), + KEY `TBLS_N49` (`DB_ID`), + CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `MV_TABLES_USED` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` ( + `MV_CREATION_METADATA_ID` bigint(20) NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + `INSERTED_COUNT` bigint(20) NOT NULL DEFAULT 0, + `UPDATED_COUNT` bigint(20) NOT NULL DEFAULT 0, + `DELETED_COUNT` bigint(20) NOT NULL DEFAULT 0, + CONSTRAINT `MV_TABLES_USED_PK` PRIMARY KEY (`TBL_ID`, `MV_CREATION_METADATA_ID`), + CONSTRAINT `MV_TABLES_USED_FK1` FOREIGN KEY (`MV_CREATION_METADATA_ID`) REFERENCES `MV_CREATION_METADATA` (`MV_CREATION_METADATA_ID`), + CONSTRAINT `MV_TABLES_USED_FK2` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TBL_COLUMN_GRANT_ID`), + KEY `TABLECOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `TBL_COL_PRIVS_N49` (`TBL_ID`), + CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TBL_GRANT_ID`), + KEY `TBL_PRIVS_N49` (`TBL_ID`), + KEY `TABLEPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TAB_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `BIT_VECTOR` blob, + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + `ENGINE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `HISTOGRAM` blob, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (DB_NAME, TABLE_NAME, COLUMN_NAME, CAT_NAME) USING BTREE; +-- +-- Table structure for table `PART_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PART_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `BIT_VECTOR` blob, + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + `ENGINE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `HISTOGRAM` blob, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME,CAT_NAME) USING BTREE; + +-- +-- Table structure for table `TYPES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPES` ( + `TYPES_ID` bigint(20) NOT NULL, + `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TYPES_ID`), + UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TYPE_FIELDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` ( + `TYPE_NAME` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`), + KEY `TYPE_FIELDS_N49` (`TYPE_NAME`), + CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE IF NOT EXISTS `MASTER_KEYS` +( + `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT, + `MASTER_KEY` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`KEY_ID`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS` +( + `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL, + `TOKEN` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`TOKEN_IDENT`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- +-- Table structure for VERSION +-- +CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT NOT NULL, + `SCHEMA_VERSION` VARCHAR(127) NOT NULL, + `VERSION_COMMENT` VARCHAR(255), + PRIMARY KEY (`VER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNCS +-- +CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `CREATE_TIME` INT(11) NOT NULL, + `DB_ID` BIGINT(20), + `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `FUNC_TYPE` INT(11) NOT NULL, + `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin, + PRIMARY KEY (`FUNC_ID`), + UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`), + KEY `FUNCS_N49` (`DB_ID`), + CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNC_RU +-- +CREATE TABLE IF NOT EXISTS `FUNC_RU` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `RESOURCE_TYPE` INT(11) NOT NULL, + `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `INTEGER_IDX` INT(11) NOT NULL, + PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`), + CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG` +( + `NL_ID` BIGINT(20) NOT NULL, + `EVENT_ID` BIGINT(20) NOT NULL, + `EVENT_TIME` INT(11) NOT NULL, + `EVENT_TYPE` varchar(32) NOT NULL, + `CAT_NAME` varchar(256), + `DB_NAME` varchar(128), + `TBL_NAME` varchar(256), + `MESSAGE` longtext, + `MESSAGE_FORMAT` varchar(16), + PRIMARY KEY (`NL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE UNIQUE INDEX `NOTIFICATION_LOG_EVENT_ID` ON NOTIFICATION_LOG (`EVENT_ID`) USING BTREE; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` +( + `NNI_ID` BIGINT(20) NOT NULL, + `NEXT_EVENT_ID` BIGINT(20) NOT NULL, + PRIMARY KEY (`NNI_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +ALTER TABLE `NOTIFICATION_SEQUENCE` ADD CONSTRAINT `ONE_ROW_CONSTRAINT` CHECK (`NNI_ID` = 1); + +INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT 1,1 FROM DUAL WHERE NOT EXISTS (SELECT 1 FROM `NOTIFICATION_SEQUENCE`); + +CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS` +( + `CHILD_CD_ID` BIGINT, + `CHILD_INTEGER_IDX` INT(11), + `CHILD_TBL_ID` BIGINT, + `PARENT_CD_ID` BIGINT, + `PARENT_INTEGER_IDX` INT(11) NOT NULL, + `PARENT_TBL_ID` BIGINT NOT NULL, + `POSITION` BIGINT NOT NULL, + `CONSTRAINT_NAME` VARCHAR(400) NOT NULL, + `CONSTRAINT_TYPE` SMALLINT(6) NOT NULL, + `UPDATE_RULE` SMALLINT(6), + `DELETE_RULE` SMALLINT(6), + `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL, + `DEFAULT_VALUE` VARCHAR(400), + PRIMARY KEY (`PARENT_TBL_ID`, `CONSTRAINT_NAME`, `POSITION`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE; + +CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE; + +-- ----------------------------- +-- Metastore DB Properties table +-- ----------------------------- +CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` ( + `PROPERTY_KEY` varchar(255) NOT NULL, + `PROPERTY_VALUE` varchar(1000) NOT NULL, + `DESCRIPTION` varchar(1000), + `PROPERTYCONTENT` blob, + PRIMARY KEY(`PROPERTY_KEY`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + +-- --------------------- +-- Resource plan tables. +-- --------------------- +CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN ( + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `NS` varchar(128), + `QUERY_PARALLELISM` int(11), + `STATUS` varchar(20) NOT NULL, + `DEFAULT_POOL_ID` bigint(20), + PRIMARY KEY (`RP_ID`), + UNIQUE KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`, `NS`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_POOL +( + `POOL_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `PATH` varchar(767) NOT NULL, + `ALLOC_FRACTION` DOUBLE, + `QUERY_PARALLELISM` int(11), + `SCHEDULING_POLICY` varchar(767), + PRIMARY KEY (`POOL_ID`), + UNIQUE KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`), + CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`); + +CREATE TABLE IF NOT EXISTS WM_TRIGGER +( + `TRIGGER_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `NAME` varchar(128) NOT NULL, + `TRIGGER_EXPRESSION` varchar(1024), + `ACTION_EXPRESSION` varchar(1024), + `IS_IN_UNMANAGED` bit(1) NOT NULL DEFAULT 0, + PRIMARY KEY (`TRIGGER_ID`), + UNIQUE KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`), + CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER +( + `POOL_ID` bigint(20) NOT NULL, + `TRIGGER_ID` bigint(20) NOT NULL, + PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`), + CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS WM_MAPPING +( + `MAPPING_ID` bigint(20) NOT NULL, + `RP_ID` bigint(20) NOT NULL, + `ENTITY_TYPE` varchar(128) NOT NULL, + `ENTITY_NAME` varchar(128) NOT NULL, + `POOL_ID` bigint(20), + `ORDERING` int, + PRIMARY KEY (`MAPPING_ID`), + UNIQUE KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`), + CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`), + CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +CREATE TABLE TXNS ( + TXN_ID bigint PRIMARY KEY AUTO_INCREMENT, + TXN_STATE char(1) NOT NULL, + TXN_STARTED bigint NOT NULL, + TXN_LAST_HEARTBEAT bigint NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar(128), + TXN_META_INFO varchar(128), + TXN_HEARTBEAT_COUNT int, + TXN_TYPE int +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, TXN_HOST) + VALUES(0, 'c', 0, 0, '', ''); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID bigint NOT NULL, + TC_DATABASE varchar(128) NOT NULL, + TC_TABLE varchar(256), + TC_PARTITION varchar(767), + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID bigint, + FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID bigint NOT NULL, + CTC_DATABASE varchar(128) NOT NULL, + CTC_TABLE varchar(256), + CTC_PARTITION varchar(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID bigint, + CTC_UPDATE_DELETE char(1) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE; + +CREATE TABLE TXN_LOCK_TBL ( + TXN_LOCK bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO TXN_LOCK_TBL VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID bigint NOT NULL, + HL_LOCK_INT_ID bigint NOT NULL, + HL_TXNID bigint NOT NULL, + HL_DB varchar(128) NOT NULL, + HL_TABLE varchar(256), + HL_PARTITION varchar(767), + HL_LOCK_STATE char(1) not null, + HL_LOCK_TYPE char(1) not null, + HL_LAST_HEARTBEAT bigint NOT NULL, + HL_ACQUIRED_AT bigint, + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT int, + HL_AGENT_INFO varchar(128), + HL_BLOCKEDBY_EXT_ID bigint, + HL_BLOCKEDBY_INT_ID bigint, + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID), + KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID bigint PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(256) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_ENQUEUE_TIME bigint, + CQ_START bigint, + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID bigint, + CQ_META_INFO varbinary(2048), + CQ_HADOOP_JOB_ID varchar(32), + CQ_ERROR_MESSAGE mediumtext, + CQ_NEXT_TXN_ID bigint, + CQ_TXN_ID bigint, + CQ_COMMIT_TIME bigint, + CQ_INITIATOR_ID varchar(128), + CQ_INITIATOR_VERSION varchar(128), + CQ_WORKER_VERSION varchar(128), + CQ_CLEANER_START bigint, + CQ_RETRY_RETENTION bigint NOT NULL DEFAULT 0, + CQ_POOL_NAME varchar(128), + CQ_NUMBER_OF_BUCKETS integer, + CQ_ORDER_BY varchar(4000) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID bigint PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(256) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_ENQUEUE_TIME bigint, + CC_START bigint, + CC_END bigint, + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID bigint, + CC_META_INFO varbinary(2048), + CC_HADOOP_JOB_ID varchar(32), + CC_ERROR_MESSAGE mediumtext, + CC_NEXT_TXN_ID bigint, + CC_TXN_ID bigint, + CC_COMMIT_TIME bigint, + CC_INITIATOR_ID varchar(128), + CC_INITIATOR_VERSION varchar(128), + CC_WORKER_VERSION varchar(128), + CC_POOL_NAME varchar(128), + CC_NUMBER_OF_BUCKETS integer, + CC_ORDER_BY varchar(4000) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); + +-- HIVE-25842 +CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_DATABASE varchar(128) NOT NULL, + CMC_TABLE varchar(256) NOT NULL, + CMC_PARTITION varchar(767), + CMC_METRIC_TYPE varchar(128) NOT NULL, + CMC_METRIC_VALUE int NOT NULL, + CMC_VERSION int NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar(128) NOT NULL, + MT_KEY2 bigint NOT NULL, + MT_COMMENT varchar(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar(128) NOT NULL, + WS_TABLE varchar(256) NOT NULL, + WS_PARTITION varchar(767), + WS_TXNID bigint NOT NULL, + WS_COMMIT_ID bigint NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID bigint NOT NULL, + T2W_DATABASE varchar(128) NOT NULL, + T2W_TABLE varchar(256) NOT NULL, + T2W_WRITEID bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE varchar(128) NOT NULL, + NWI_TABLE varchar(256) NOT NULL, + NWI_NEXT bigint NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_WRITE_ID ( + MH_TXNID bigint NOT NULL, + MH_DATABASE varchar(128) NOT NULL, + MH_TABLE varchar(256) NOT NULL, + MH_WRITEID bigint NOT NULL, + FOREIGN KEY (MH_TXNID) REFERENCES TXNS (TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID bigint NOT NULL, + MHL_MIN_OPEN_TXNID bigint NOT NULL, + PRIMARY KEY(MHL_TXNID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( + MRL_TXN_ID bigint NOT NULL, + MRL_DB_NAME VARCHAR(128) NOT NULL, + MRL_TBL_NAME VARCHAR(256) NOT NULL, + MRL_LAST_HEARTBEAT bigint NOT NULL, + PRIMARY KEY(MRL_TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE `I_SCHEMA` ( + `SCHEMA_ID` BIGINT PRIMARY KEY, + `SCHEMA_TYPE` INTEGER NOT NULL, + `NAME` VARCHAR(256), + `DB_ID` BIGINT, + `COMPATIBILITY` INTEGER NOT NULL, + `VALIDATION_LEVEL` INTEGER NOT NULL, + `CAN_EVOLVE` bit(1) NOT NULL, + `SCHEMA_GROUP` VARCHAR(256), + `DESCRIPTION` VARCHAR(4000), + FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`), + KEY `UNIQUE_NAME` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE `SCHEMA_VERSION` ( + `SCHEMA_VERSION_ID` bigint primary key, + `SCHEMA_ID` BIGINT, + `VERSION` INTEGER NOT NULL, + `CREATED_AT` BIGINT NOT NULL, + `CD_ID` BIGINT, + `STATE` INTEGER NOT NULL, + `DESCRIPTION` VARCHAR(4000), + `SCHEMA_TEXT` mediumtext, + `FINGERPRINT` VARCHAR(256), + `SCHEMA_VERSION_NAME` VARCHAR(256), + `SERDE_ID` bigint, + FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`), + FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`), + FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`), + KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID bigint NOT NULL, + RTM_TARGET_TXN_ID bigint NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + + +CREATE TABLE RUNTIME_STATS ( + RS_ID bigint primary key, + CREATE_TIME bigint NOT NULL, + WEIGHT bigint NOT NULL, + PAYLOAD blob +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( + WNL_ID bigint NOT NULL, + WNL_TXNID bigint NOT NULL, + WNL_WRITEID bigint NOT NULL, + WNL_DATABASE varchar(128) NOT NULL, + WNL_TABLE varchar(256) NOT NULL, + WNL_PARTITION varchar(767) NOT NULL, + WNL_TABLE_OBJ longtext NOT NULL, + WNL_PARTITION_OBJ longtext, + WNL_FILES longtext, + WNL_EVENT_TIME INT(11) NOT NULL, + PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); + + +CREATE TABLE SCHEDULED_QUERIES ( + SCHEDULED_QUERY_ID BIGINT NOT NULL, + CLUSTER_NAMESPACE VARCHAR(256), + ENABLED BOOLEAN NOT NULL, + NEXT_EXECUTION INTEGER, + QUERY VARCHAR(4000), + SCHEDULE VARCHAR(256), + SCHEDULE_NAME VARCHAR(256), + `USER` VARCHAR(256), + ACTIVE_EXECUTION_ID INTEGER, + CONSTRAINT SCHEDULED_QUERIES_PK PRIMARY KEY (SCHEDULED_QUERY_ID) +); + +CREATE TABLE SCHEDULED_EXECUTIONS ( + SCHEDULED_EXECUTION_ID BIGINT NOT NULL, + END_TIME INTEGER, + ERROR_MESSAGE VARCHAR(2000), + EXECUTOR_QUERY_ID VARCHAR(256), + LAST_UPDATE_TIME INTEGER, + SCHEDULED_QUERY_ID BIGINT, + START_TIME INTEGER, + STATE VARCHAR(256), + CONSTRAINT SCHEDULED_EXECUTIONS_PK PRIMARY KEY (SCHEDULED_EXECUTION_ID), + CONSTRAINT SCHEDULED_EXECUTIONS_SCHQ_FK FOREIGN KEY (SCHEDULED_QUERY_ID) REFERENCES SCHEDULED_QUERIES(SCHEDULED_QUERY_ID) ON DELETE CASCADE +); + +CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON SCHEDULED_EXECUTIONS (LAST_UPDATE_TIME); +CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_QUERY_ID); +CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON SCHEDULED_EXECUTIONS (SCHEDULED_EXECUTION_ID); + +-- Create table replication metrics +CREATE TABLE IF NOT EXISTS REPLICATION_METRICS ( + RM_SCHEDULED_EXECUTION_ID bigint NOT NULL, + RM_POLICY varchar(256) NOT NULL, + RM_DUMP_EXECUTION_ID bigint NOT NULL, + RM_METADATA varchar(4000), + RM_PROGRESS varchar(10000), + RM_START_TIME integer NOT NULL, + MESSAGE_FORMAT varchar(16) DEFAULT 'json-0.2', + PRIMARY KEY(RM_SCHEDULED_EXECUTION_ID) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Create indexes for the replication metrics table +CREATE INDEX POLICY_IDX ON REPLICATION_METRICS (RM_POLICY); +CREATE INDEX DUMP_IDX ON REPLICATION_METRICS (RM_DUMP_EXECUTION_ID); + +-- Create stored procedure tables +CREATE TABLE STORED_PROCS ( + `SP_ID` BIGINT(20) NOT NULL, + `CREATE_TIME` INT(11) NOT NULL, + `DB_ID` BIGINT(20) NOT NULL, + `NAME` VARCHAR(256) NOT NULL, + `OWNER_NAME` VARCHAR(128) NOT NULL, + `SOURCE` LONGTEXT NOT NULL, + PRIMARY KEY (`SP_ID`) +); + +CREATE UNIQUE INDEX UNIQUESTOREDPROC ON STORED_PROCS (NAME, DB_ID); +ALTER TABLE `STORED_PROCS` ADD CONSTRAINT `STOREDPROC_FK1` FOREIGN KEY (`DB_ID`) REFERENCES DBS (`DB_ID`); + +-- Create stored procedure tables +CREATE TABLE PACKAGES ( + `PKG_ID` BIGINT(20) NOT NULL, + `CREATE_TIME` INT(11) NOT NULL, + `DB_ID` BIGINT(20) NOT NULL, + `NAME` VARCHAR(256) NOT NULL, + `OWNER_NAME` VARCHAR(128) NOT NULL, + `HEADER` LONGTEXT NOT NULL, + `BODY` LONGTEXT NOT NULL, + PRIMARY KEY (`PKG_ID`) +); + +CREATE UNIQUE INDEX UNIQUEPKG ON PACKAGES (NAME, DB_ID); +ALTER TABLE `PACKAGES` ADD CONSTRAINT `PACKAGES_FK1` FOREIGN KEY (`DB_ID`) REFERENCES DBS (`DB_ID`); + +-- HIVE-24396 +-- Create DataConnectors and DataConnector_Params tables +CREATE TABLE `DATACONNECTORS` ( + `NAME` VARCHAR(128) NOT NULL, + `TYPE` VARCHAR(32) NOT NULL, + `URL` VARCHAR(4000) NOT NULL, + `COMMENT` VARCHAR(256), + `OWNER_NAME` VARCHAR(256), + `OWNER_TYPE` VARCHAR(10), + `CREATE_TIME` INT(11) NOT NULL, + PRIMARY KEY (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE `DATACONNECTOR_PARAMS` ( + `NAME` VARCHAR(128) NOT NULL, + `PARAM_KEY` VARCHAR(180) NOT NULL, + `PARAM_VALUE` VARCHAR(4000), + PRIMARY KEY (`NAME`, `PARAM_KEY`), + CONSTRAINT `DATACONNECTOR_NAME_FK1` FOREIGN KEY (`NAME`) REFERENCES `DATACONNECTORS` (`NAME`) ON DELETE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `DC_PRIVS` ( + `DC_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `NAME` VARCHAR(128) DEFAULT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DC_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DC_GRANT_ID`), + UNIQUE KEY `DCPRIVILEGEINDEX` (`AUTHORIZER`,`NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DC_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `DC_PRIVS_N49` (`NAME`), + CONSTRAINT `DC_PRIVS_FK1` FOREIGN KEY (`NAME`) REFERENCES `DATACONNECTORS` (`NAME`) + ) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.1.0', 'Hive release version 4.1.0'); + +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2012-08-23 0:56:31 diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-1-to-4.0.0-alpha-2.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-1-to-4.0.0-alpha-2.mysql.sql index e7a9a6f21163..951ac24507a1 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-1-to-4.0.0-alpha-2.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-1-to-4.0.0-alpha-2.mysql.sql @@ -5,8 +5,8 @@ ALTER TABLE `COMPLETED_COMPACTIONS` ADD COLUMN `CC_NEXT_TXN_ID` bigint; ALTER TABLE `COMPLETED_COMPACTIONS` ADD COLUMN `CC_TXN_ID` bigint; ALTER TABLE `COMPLETED_COMPACTIONS` ADD COLUMN `CC_COMMIT_TIME` bigint; --- HIVE-26324 -ALTER TABLE `NOTIFICATION_SEQUENCE` MODIFY COLUMN `NNI_ID` INT GENERATED ALWAYS AS (1) STORED NOT NULL; +-- HIVE-27749 +ALTER TABLE `NOTIFICATION_SEQUENCE` ADD CONSTRAINT `ONE_ROW_CONSTRAINT` CHECK (`NNI_ID` = 1); -- HIVE-26443 ALTER TABLE `COMPACTION_QUEUE` ADD COLUMN `CQ_POOL_NAME` VARCHAR(128); diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mysql.sql index fee7042115fa..0d4b22747610 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mysql.sql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.mysql.sql @@ -32,18 +32,22 @@ ALTER TABLE METASTORE_DB_PROPERTIES ADD PROPERTYCONTENT blob; -- HIVE-27457 UPDATE SDS - SET SDS.INPUT_FORMAT = "org.apache.hadoop.hive.kudu.KuduInputFormat", - SDS.OUTPUT_FORMAT = "org.apache.hadoop.hive.kudu.KuduOutputFormat" - WHERE SDS.SD_ID IN ( - SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_VALUE LIKE '%KuduStorageHandler%' + SET INPUT_FORMAT = 'org.apache.hadoop.hive.kudu.KuduInputFormat', OUTPUT_FORMAT = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' + WHERE SD_ID IN ( + SELECT TBLS.SD_ID + FROM TBLS + INNER JOIN TABLE_PARAMS ON TBLS.TBL_ID = TABLE_PARAMS.TBL_ID + WHERE PARAM_VALUE LIKE '%KuduStorageHandler%' ); + UPDATE SERDES SET SERDES.SLIB = "org.apache.hadoop.hive.kudu.KuduSerDe" WHERE SERDE_ID IN ( SELECT SDS.SERDE_ID FROM TBLS - LEFT JOIN SDS ON TBLS.SD_ID = SDS.SD_ID - WHERE TBL_ID IN (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_VALUE LIKE '%KuduStorageHandler%') + INNER JOIN SDS ON TBLS.SD_ID = SDS.SD_ID + INNER JOIN TABLE_PARAMS ON TBLS.TBL_ID = TABLE_PARAMS.TBL_ID + WHERE TABLE_PARAMS.PARAM_VALUE LIKE '%KuduStorageHandler%' ); -- These lines need to be last. Insert any changes above. diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.mysql.sql deleted file mode 100644 index 9d3bcb028abe..000000000000 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.mysql.sql +++ /dev/null @@ -1,9 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2' AS MESSAGE; - --- HIVE-24815: Remove "IDXS" Table from Metastore Schema -DROP TABLE `INDEX_PARAMS`; -DROP TABLE `IDXS`; - --- These lines need to be last. Insert any changes above. -UPDATE VERSION SET SCHEMA_VERSION='4.0.0-beta-2', VERSION_COMMENT='Hive release version 4.0.0-beta-2' where VER_ID=1; -SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-beta-1-to-4.0.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-beta-1-to-4.0.0.mysql.sql new file mode 100644 index 000000000000..dd4a76033e4a --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-beta-1-to-4.0.0.mysql.sql @@ -0,0 +1,14 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0' AS MESSAGE; + +-- HIVE-24815: Remove "IDXS" Table from Metastore Schema +DROP TABLE `INDEX_PARAMS`; +DROP TABLE `IDXS`; + +-- HIVE-27827 +DROP INDEX UNIQUEPARTITION ON PARTITIONS; +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID, PART_NAME); +DROP INDEX PARTITIONS_N49 on PARTITIONS; + +-- These lines need to be last. Insert any changes above. +UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql new file mode 100644 index 000000000000..ffa92601d79a --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade-4.0.0-to-4.1.0.mysql.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE; + +-- These lines need to be last. Insert any changes above. +UPDATE VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release version 4.1.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0' AS MESSAGE; diff --git a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade.order.mysql b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade.order.mysql index 809b719e7324..58500aa07e6c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade.order.mysql +++ b/standalone-metastore/metastore-server/src/main/sql/mysql/upgrade.order.mysql @@ -19,4 +19,5 @@ 3.2.0-to-4.0.0-alpha-1 4.0.0-alpha-1-to-4.0.0-alpha-2 4.0.0-alpha-2-to-4.0.0-beta-1 -4.0.0-beta-1-to-4.0.0-beta-2 +4.0.0-beta-1-to-4.0.0 +4.0.0-to-4.1.0 diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0-beta-2.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql similarity index 99% rename from standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0-beta-2.oracle.sql rename to standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql index a2fb02c89111..07fd214c4b62 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0-beta-2.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql @@ -763,9 +763,7 @@ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFEREN CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); -CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID); - -CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID); +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID, PART_NAME); -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; @@ -1321,4 +1319,4 @@ CREATE INDEX DC_PRIVS_N49 ON DC_PRIVS (NAME); -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- -INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0-beta-2', 'Hive release version 4.0.0-beta-2'); +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql new file mode 100644 index 000000000000..5623e23092db --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/hive-schema-4.1.0.oracle.sql @@ -0,0 +1,1322 @@ +-- Table SEQUENCE_TABLE is an internal table required by DataNucleus. +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 + +-- HIVE-21336 safeguards from failures from indices being too long +ALTER SESSION SET NLS_LENGTH_SEMANTICS=BYTE; + +CREATE TABLE SEQUENCE_TABLE +( + SEQUENCE_NAME VARCHAR2(255) NOT NULL, + NEXT_VAL NUMBER NOT NULL +); + +ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME); + +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +-- Table NUCLEUS_TABLES is an internal table required by DataNucleus. +-- This table is required if datanucleus.autoStartMechanism=SchemaTable +-- NOTE: Some versions of SchemaTool do not automatically generate this table. +-- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416 +CREATE TABLE NUCLEUS_TABLES +( + CLASS_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(128) NOT NULL, + TYPE VARCHAR2(4) NOT NULL, + OWNER VARCHAR2(2) NOT NULL, + VERSION VARCHAR2(20) NOT NULL, + INTERFACE_NAME VARCHAR2(255) NULL +); + +ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME); + +-- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +CREATE TABLE PART_COL_PRIVS +( + PART_COLUMN_GRANT_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(767) NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PART_ID NUMBER NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + PART_COL_PRIV VARCHAR2(128) NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID); + +-- Table CDS. +CREATE TABLE CDS +( + CD_ID NUMBER NOT NULL +); + +ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID); + +-- Table COLUMNS_V2 for join relationship +CREATE TABLE COLUMNS_V2 +( + CD_ID NUMBER NOT NULL, + "COMMENT" VARCHAR2(4000) NULL, + "COLUMN_NAME" VARCHAR2(767) NOT NULL, + TYPE_NAME CLOB NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME"); + +-- Table PARTITION_KEY_VALS for join relationship +CREATE TABLE PARTITION_KEY_VALS +( + PART_ID NUMBER NOT NULL, + PART_KEY_VAL VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX); + +CREATE TABLE CTLGS ( + CTLG_ID NUMBER PRIMARY KEY, + "NAME" VARCHAR2(256), + "DESC" VARCHAR2(4000), + LOCATION_URI VARCHAR2(4000) NOT NULL, + CREATE_TIME NUMBER (10), + UNIQUE ("NAME") +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL); + +-- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE TABLE DBS +( + DB_ID NUMBER NOT NULL, + "DESC" VARCHAR2(4000) NULL, + DB_LOCATION_URI VARCHAR2(4000) NOT NULL, + "NAME" VARCHAR2(128) NULL, + OWNER_NAME VARCHAR2(128) NULL, + OWNER_TYPE VARCHAR2(10) NULL, + CTLG_NAME VARCHAR2(256) DEFAULT 'hive' NOT NULL, + CREATE_TIME NUMBER (10), + DB_MANAGED_LOCATION_URI VARCHAR2(4000) NULL, + TYPE VARCHAR2(32) DEFAULT 'NATIVE' NOT NULL, + DATACONNECTOR_NAME VARCHAR2(128) NULL, + REMOTE_DBNAME VARCHAR2(128) NULL +); + +ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID); + +CREATE INDEX CTLG_NAME_DBS ON DBS(CTLG_NAME); + +-- Table PARTITION_PARAMS for join relationship +CREATE TABLE PARTITION_PARAMS +( + PART_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY); + +-- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo] +CREATE TABLE SERDES +( + SERDE_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NULL, + SLIB VARCHAR2(4000) NULL, + "DESCRIPTION" VARCHAR2(4000), + "SERIALIZER_CLASS" VARCHAR2(4000), + "DESERIALIZER_CLASS" VARCHAR2(4000), + "SERDE_TYPE" NUMBER +); + +ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID); + +-- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType] +CREATE TABLE TYPES +( + TYPES_ID NUMBER NOT NULL, + TYPE_NAME VARCHAR2(128) NULL, + TYPE1 VARCHAR2(767) NULL, + TYPE2 VARCHAR2(767) NULL +); + +ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID); + +-- Table PARTITION_KEYS for join relationship +CREATE TABLE PARTITION_KEYS +( + TBL_ID NUMBER NOT NULL, + PKEY_COMMENT VARCHAR2(4000) NULL, + PKEY_NAME VARCHAR2(128) NOT NULL, + PKEY_TYPE VARCHAR2(767) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME); + +-- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole] +CREATE TABLE ROLES +( + ROLE_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + OWNER_NAME VARCHAR2(128) NULL, + ROLE_NAME VARCHAR2(128) NULL +); + +ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID); + +-- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition] +CREATE TABLE PARTITIONS +( + PART_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + PART_NAME VARCHAR2(767) NULL, + SD_ID NUMBER NULL, + TBL_ID NUMBER NULL, + WRITE_ID NUMBER DEFAULT 0 NOT NULL +); + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID); + +-- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +CREATE TABLE TBL_COL_PRIVS +( + TBL_COLUMN_GRANT_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(767) NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + TBL_COL_PRIV VARCHAR2(128) NULL, + TBL_ID NUMBER NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID); + +-- Table BUCKETING_COLS for join relationship +CREATE TABLE BUCKETING_COLS +( + SD_ID NUMBER NOT NULL, + BUCKET_COL_NAME VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table TYPE_FIELDS for join relationship +CREATE TABLE TYPE_FIELDS +( + TYPE_NAME NUMBER NOT NULL, + "COMMENT" VARCHAR2(256) NULL, + FIELD_NAME VARCHAR2(128) NOT NULL, + FIELD_TYPE VARCHAR2(767) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME); + +-- Table SD_PARAMS for join relationship +CREATE TABLE SD_PARAMS +( + SD_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY); + +-- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE TABLE GLOBAL_PRIVS +( + USER_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + USER_PRIV VARCHAR2(128) NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID); + +-- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +CREATE TABLE SDS +( + SD_ID NUMBER NOT NULL, + CD_ID NUMBER NULL, + INPUT_FORMAT VARCHAR2(4000) NULL, + IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)), + LOCATION VARCHAR2(4000) NULL, + NUM_BUCKETS NUMBER (10) NOT NULL, + OUTPUT_FORMAT VARCHAR2(4000) NULL, + SERDE_ID NUMBER NULL, + IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0)) +); + +ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID); + +-- Table TABLE_PARAMS for join relationship +CREATE TABLE TABLE_PARAMS +( + TBL_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY); + +-- Table SORT_COLS for join relationship +CREATE TABLE SORT_COLS +( + SD_ID NUMBER NOT NULL, + "COLUMN_NAME" VARCHAR2(767) NULL, + "ORDER" NUMBER (10) NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +-- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +CREATE TABLE TBL_PRIVS +( + TBL_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + TBL_PRIV VARCHAR2(128) NULL, + TBL_ID NUMBER NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID); + +-- Table DATABASE_PARAMS for join relationship +CREATE TABLE DATABASE_PARAMS +( + DB_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(180) NOT NULL, + PARAM_VALUE VARCHAR2(4000) NULL +); + +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY); + +-- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap] +CREATE TABLE ROLE_MAP +( + ROLE_GRANT_ID NUMBER NOT NULL, + ADD_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + ROLE_ID NUMBER NULL +); + +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID); + +-- Table SERDE_PARAMS for join relationship +CREATE TABLE SERDE_PARAMS +( + SERDE_ID NUMBER NOT NULL, + PARAM_KEY VARCHAR2(256) NOT NULL, + PARAM_VALUE CLOB NULL +); + +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY); + +-- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +CREATE TABLE PART_PRIVS +( + PART_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PART_ID NUMBER NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + PART_PRIV VARCHAR2(128) NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID); + +-- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +CREATE TABLE DB_PRIVS +( + DB_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DB_ID NUMBER NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + DB_PRIV VARCHAR2(128) NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID); + +-- Table DC_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDCPrivilege] +CREATE TABLE DC_PRIVS +( + DC_GRANT_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + NAME VARCHAR2(128) NULL, + GRANT_OPTION NUMBER (5) NOT NULL, + GRANTOR VARCHAR2(128) NULL, + GRANTOR_TYPE VARCHAR2(128) NULL, + PRINCIPAL_NAME VARCHAR2(128) NULL, + PRINCIPAL_TYPE VARCHAR2(128) NULL, + DC_PRIV VARCHAR2(128) NULL, + AUTHORIZER VARCHAR2(128) NULL +); + +ALTER TABLE DC_PRIVS ADD CONSTRAINT DC_PRIVS_PK PRIMARY KEY (DC_GRANT_ID); + + +-- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable] +CREATE TABLE TBLS +( + TBL_ID NUMBER NOT NULL, + CREATE_TIME NUMBER (10) NOT NULL, + DB_ID NUMBER NULL, + LAST_ACCESS_TIME NUMBER (10) NOT NULL, + OWNER VARCHAR2(767) NULL, + OWNER_TYPE VARCHAR2(10) NULL, + RETENTION NUMBER (10) NOT NULL, + SD_ID NUMBER NULL, + TBL_NAME VARCHAR2(256) NULL, + TBL_TYPE VARCHAR2(128) NULL, + VIEW_EXPANDED_TEXT CLOB NULL, + VIEW_ORIGINAL_TEXT CLOB NULL, + IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)), + WRITE_ID NUMBER DEFAULT 0 NOT NULL +); + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID); + +-- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] +CREATE TABLE MV_CREATION_METADATA +( + MV_CREATION_METADATA_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TBL_NAME VARCHAR2(256) NOT NULL, + TXN_LIST CLOB NULL, + MATERIALIZATION_TIME NUMBER NOT NULL +); + +ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID); + +CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME"); + +-- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata] +CREATE TABLE MV_TABLES_USED +( + MV_CREATION_METADATA_ID NUMBER NOT NULL, + TBL_ID NUMBER NOT NULL, + INSERTED_COUNT NUMBER DEFAULT 0 NOT NULL, + UPDATED_COUNT NUMBER DEFAULT 0 NOT NULL, + DELETED_COUNT NUMBER DEFAULT 0 NOT NULL +); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_PK PRIMARY KEY (TBL_ID, MV_CREATION_METADATA_ID); + +-- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE TABLE PARTITION_EVENTS +( + PART_NAME_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NULL, + DB_NAME VARCHAR2(128) NULL, + EVENT_TIME NUMBER NOT NULL, + EVENT_TYPE NUMBER (10) NOT NULL, + PARTITION_NAME VARCHAR2(767) NULL, + TBL_NAME VARCHAR2(256) NULL +); + +ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID); + +-- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList] +CREATE TABLE SKEWED_STRING_LIST +( + STRING_LIST_ID NUMBER NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID); + +CREATE TABLE SKEWED_STRING_LIST_VALUES +( + STRING_LIST_ID NUMBER NOT NULL, + "STRING_LIST_VALUE" VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX); + +ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_COL_NAMES +( + SD_ID NUMBER NOT NULL, + "SKEWED_COL_NAME" VARCHAR2(256) NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX); + +ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_COL_VALUE_LOC_MAP +( + SD_ID NUMBER NOT NULL, + STRING_LIST_ID_KID NUMBER NOT NULL, + "LOCATION" VARCHAR2(4000) NULL +); + +CREATE TABLE MASTER_KEYS +( + KEY_ID NUMBER (10) NOT NULL, + MASTER_KEY VARCHAR2(767) NULL +); + +CREATE TABLE DELEGATION_TOKENS +( + TOKEN_IDENT VARCHAR2(767) NOT NULL, + TOKEN VARCHAR2(767) NULL +); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID); + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE TABLE SKEWED_VALUES +( + SD_ID_OID NUMBER NOT NULL, + STRING_LIST_ID_EID NUMBER NOT NULL, + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX); + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ; + +ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED; + +-- column statistics + +CREATE TABLE TAB_COL_STATS ( + CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(256) NOT NULL, + COLUMN_NAME VARCHAR2(767) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + TBL_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + BIT_VECTOR BLOB, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL, + ENGINE VARCHAR2(128) NOT NULL, + HISTOGRAM BLOB +); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID); + +ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID); + +CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (DB_NAME, TABLE_NAME, COLUMN_NAME, CAT_NAME); + +CREATE TABLE VERSION ( + VER_ID NUMBER NOT NULL, + SCHEMA_VERSION VARCHAR(127) NOT NULL, + VERSION_COMMENT VARCHAR(255) +); +ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID); + +CREATE TABLE PART_COL_STATS ( + CS_ID NUMBER NOT NULL, + CAT_NAME VARCHAR2(256) NOT NULL, + DB_NAME VARCHAR2(128) NOT NULL, + TABLE_NAME VARCHAR2(256) NOT NULL, + PARTITION_NAME VARCHAR2(767) NOT NULL, + COLUMN_NAME VARCHAR2(767) NOT NULL, + COLUMN_TYPE VARCHAR2(128) NOT NULL, + PART_ID NUMBER NOT NULL, + LONG_LOW_VALUE NUMBER, + LONG_HIGH_VALUE NUMBER, + DOUBLE_LOW_VALUE NUMBER, + DOUBLE_HIGH_VALUE NUMBER, + BIG_DECIMAL_LOW_VALUE VARCHAR2(4000), + BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000), + NUM_NULLS NUMBER NOT NULL, + NUM_DISTINCTS NUMBER, + BIT_VECTOR BLOB, + AVG_COL_LEN NUMBER, + MAX_COL_LEN NUMBER, + NUM_TRUES NUMBER, + NUM_FALSES NUMBER, + LAST_ANALYZED NUMBER NOT NULL, + ENGINE VARCHAR2(128) NOT NULL, + HISTOGRAM BLOB +); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID); + +ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED; + +CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME,CAT_NAME); + +CREATE TABLE FUNCS ( + FUNC_ID NUMBER NOT NULL, + CLASS_NAME VARCHAR2(4000), + CREATE_TIME NUMBER(10) NOT NULL, + DB_ID NUMBER, + FUNC_NAME VARCHAR2(128), + FUNC_TYPE NUMBER(10) NOT NULL, + OWNER_NAME VARCHAR2(128), + OWNER_TYPE VARCHAR2(10) +); + +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID); + +CREATE TABLE FUNC_RU ( + FUNC_ID NUMBER NOT NULL, + RESOURCE_TYPE NUMBER(10) NOT NULL, + RESOURCE_URI VARCHAR2(4000), + INTEGER_IDX NUMBER(10) NOT NULL +); + +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX); + +CREATE TABLE NOTIFICATION_LOG +( + NL_ID NUMBER NOT NULL, + EVENT_ID NUMBER NOT NULL, + EVENT_TIME NUMBER(10) NOT NULL, + EVENT_TYPE VARCHAR2(32) NOT NULL, + CAT_NAME VARCHAR2(256), + DB_NAME VARCHAR2(128), + TBL_NAME VARCHAR2(256), + MESSAGE CLOB NULL, + MESSAGE_FORMAT VARCHAR(16) NULL +); + +ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID); + +CREATE UNIQUE INDEX NOTIFICATION_LOG_EVENT_ID ON NOTIFICATION_LOG(EVENT_ID); + +CREATE TABLE NOTIFICATION_SEQUENCE +( + NNI_ID NUMBER NOT NULL, + NEXT_EVENT_ID NUMBER NOT NULL +); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID); + +ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT ONE_ROW_CONSTRAINT CHECK (NNI_ID = 1); + +INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE); + +-- Tables to manage resource plans. + +CREATE TABLE WM_RESOURCEPLAN +( + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + NS VARCHAR2(128), + QUERY_PARALLELISM NUMBER(10), + STATUS VARCHAR2(20) NOT NULL, + DEFAULT_POOL_ID NUMBER +); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID); + +CREATE TABLE WM_POOL +( + POOL_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + PATH VARCHAR2(1024) NOT NULL, + ALLOC_FRACTION NUMBER, + QUERY_PARALLELISM NUMBER(10), + SCHEDULING_POLICY VARCHAR2(1024) +); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID); + +CREATE TABLE WM_TRIGGER +( + TRIGGER_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + "NAME" VARCHAR2(128) NOT NULL, + TRIGGER_EXPRESSION VARCHAR2(1024), + ACTION_EXPRESSION VARCHAR2(1024), + IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0)) +); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID); + +CREATE TABLE WM_POOL_TO_TRIGGER +( + POOL_ID NUMBER NOT NULL, + TRIGGER_ID NUMBER NOT NULL +); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID); + +CREATE TABLE WM_MAPPING +( + MAPPING_ID NUMBER NOT NULL, + RP_ID NUMBER NOT NULL, + ENTITY_TYPE VARCHAR2(128) NOT NULL, + ENTITY_NAME VARCHAR2(128) NOT NULL, + POOL_ID NUMBER NOT NULL, + ORDERING NUMBER(10) +); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID); + +-- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege] +ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID); + +CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table COLUMNS_V2 +ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ; + +CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID); + + +-- Constraints for table PARTITION_KEY_VALS +ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID); + + +-- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase] +CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME); + + +-- Constraints for table PARTITION_PARAMS +ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID); + + +-- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo] + +-- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType] +CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME); + + +-- Constraints for table PARTITION_KEYS +ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID); + + +-- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole] +CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME); + + +-- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition] +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID); + +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID, PART_NAME); + +-- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege] +ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID); + +-- Constraints for table BUCKETING_COLS +ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID); + + +-- Constraints for table TYPE_FIELDS +ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ; + +CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME); + + +-- Constraints for table SD_PARAMS +ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID); + + +-- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege] +CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor] +ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ; +ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SDS_N49 ON SDS (SERDE_ID); +CREATE INDEX SDS_N50 ON SDS (CD_ID); + + +-- Constraints for table TABLE_PARAMS +ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID); + + +-- Constraints for table SORT_COLS +ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID); + + +-- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege] +ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ; + +CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID); + +CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table DATABASE_PARAMS +ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID); + + +-- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap] +ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ; + +CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID); + +CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE); + + +-- Constraints for table SERDE_PARAMS +ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ; + +CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID); + + +-- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege] +ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ; + +CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID); + + +-- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege] +ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE); + +CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID); + + +-- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable] +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ; + +ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ; + +CREATE INDEX TBLS_N49 ON TBLS (DB_ID); + +CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID); + +CREATE INDEX TBLS_N50 ON TBLS (SD_ID); + + +-- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent] +CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME); + + +-- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions] +ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED; + +CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID); + +CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID); + + +-- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions] +ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED; + +CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); + +CREATE TABLE KEY_CONSTRAINTS +( + CHILD_CD_ID NUMBER, + CHILD_INTEGER_IDX NUMBER, + CHILD_TBL_ID NUMBER, + PARENT_CD_ID NUMBER, + PARENT_INTEGER_IDX NUMBER NOT NULL, + PARENT_TBL_ID NUMBER NOT NULL, + POSITION NUMBER NOT NULL, + CONSTRAINT_NAME VARCHAR(400) NOT NULL, + CONSTRAINT_TYPE NUMBER NOT NULL, + UPDATE_RULE NUMBER, + DELETE_RULE NUMBER, + ENABLE_VALIDATE_RELY NUMBER NOT NULL, + DEFAULT_VALUE VARCHAR(400) +) ; + +ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (PARENT_TBL_ID, CONSTRAINT_NAME, POSITION); + +CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID); + +CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE); + +-- Table for METASTORE_DB_PROPERTIES and its constraints +CREATE TABLE METASTORE_DB_PROPERTIES +( + PROPERTY_KEY VARCHAR(255) NOT NULL, + PROPERTY_VALUE VARCHAR(1000) NOT NULL, + DESCRIPTION VARCHAR(1000), + PROPERTYCONTENT BLOB +); + +ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY); + +-- Constraints for resource plan tables. + +CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN (NS, "NAME"); + +CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH); + +ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME"); + +ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID); + +CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID); + +ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID); + +ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID); + +------------------------------ +-- Transaction and lock tables +------------------------------ +CREATE TABLE TXNS ( + TXN_ID NUMBER(19) GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, + TXN_STATE char(1) NOT NULL, + TXN_STARTED NUMBER(19) NOT NULL, + TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL, + TXN_USER varchar(128) NOT NULL, + TXN_HOST varchar(128) NOT NULL, + TXN_AGENT_INFO varchar2(128), + TXN_META_INFO varchar2(128), + TXN_HEARTBEAT_COUNT number(10), + TXN_TYPE number(10) +) ROWDEPENDENCIES; + +INSERT INTO TXNS (TXN_ID, TXN_STATE, TXN_STARTED, TXN_LAST_HEARTBEAT, TXN_USER, TXN_HOST) + VALUES(0, 'c', 0, 0, '_', '_'); + +CREATE TABLE TXN_COMPONENTS ( + TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + TC_DATABASE VARCHAR2(128) NOT NULL, + TC_TABLE VARCHAR2(256), + TC_PARTITION VARCHAR2(767) NULL, + TC_OPERATION_TYPE char(1) NOT NULL, + TC_WRITEID NUMBER(19) +) ROWDEPENDENCIES; + +CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID); + +CREATE TABLE COMPLETED_TXN_COMPONENTS ( + CTC_TXNID NUMBER(19) NOT NULL, + CTC_DATABASE VARCHAR2(128) NOT NULL, + CTC_TABLE VARCHAR2(256), + CTC_PARTITION VARCHAR2(767), + CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + CTC_WRITEID NUMBER(19), + CTC_UPDATE_DELETE CHAR(1) NOT NULL +) ROWDEPENDENCIES; + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION); + +CREATE TABLE TXN_LOCK_TBL ( + TXN_LOCK NUMBER(19) NOT NULL +); +INSERT INTO TXN_LOCK_TBL VALUES(1); + +CREATE TABLE HIVE_LOCKS ( + HL_LOCK_EXT_ID NUMBER(19) NOT NULL, + HL_LOCK_INT_ID NUMBER(19) NOT NULL, + HL_TXNID NUMBER(19) NOT NULL, + HL_DB VARCHAR2(128) NOT NULL, + HL_TABLE VARCHAR2(256), + HL_PARTITION VARCHAR2(767), + HL_LOCK_STATE CHAR(1) NOT NULL, + HL_LOCK_TYPE CHAR(1) NOT NULL, + HL_LAST_HEARTBEAT NUMBER(19) NOT NULL, + HL_ACQUIRED_AT NUMBER(19), + HL_USER varchar(128) NOT NULL, + HL_HOST varchar(128) NOT NULL, + HL_HEARTBEAT_COUNT number(10), + HL_AGENT_INFO varchar2(128), + HL_BLOCKEDBY_EXT_ID number(19), + HL_BLOCKEDBY_INT_ID number(19), + PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID) +) ROWDEPENDENCIES; + +CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID); + +CREATE TABLE NEXT_LOCK_ID ( + NL_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_LOCK_ID VALUES(1); + +CREATE TABLE COMPACTION_QUEUE ( + CQ_ID NUMBER(19) PRIMARY KEY, + CQ_DATABASE varchar(128) NOT NULL, + CQ_TABLE varchar(256) NOT NULL, + CQ_PARTITION varchar(767), + CQ_STATE char(1) NOT NULL, + CQ_TYPE char(1) NOT NULL, + CQ_TBLPROPERTIES varchar(2048), + CQ_WORKER_ID varchar(128), + CQ_ENQUEUE_TIME NUMBER(19), + CQ_START NUMBER(19), + CQ_RUN_AS varchar(128), + CQ_HIGHEST_WRITE_ID NUMBER(19), + CQ_META_INFO BLOB, + CQ_HADOOP_JOB_ID varchar2(32), + CQ_ERROR_MESSAGE CLOB, + CQ_NEXT_TXN_ID NUMBER(19), + CQ_TXN_ID NUMBER(19), + CQ_COMMIT_TIME NUMBER(19), + CQ_INITIATOR_ID varchar(128), + CQ_INITIATOR_VERSION varchar(128), + CQ_WORKER_VERSION varchar(128), + CQ_CLEANER_START NUMBER(19), + CQ_RETRY_RETENTION NUMBER(19) DEFAULT 0 NOT NULL, + CQ_POOL_NAME varchar(128), + CQ_NUMBER_OF_BUCKETS integer, + CQ_ORDER_BY varchar(4000) +) ROWDEPENDENCIES; + +CREATE TABLE NEXT_COMPACTION_QUEUE_ID ( + NCQ_NEXT NUMBER(19) NOT NULL +); +INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1); + +CREATE TABLE COMPLETED_COMPACTIONS ( + CC_ID NUMBER(19) PRIMARY KEY, + CC_DATABASE varchar(128) NOT NULL, + CC_TABLE varchar(256) NOT NULL, + CC_PARTITION varchar(767), + CC_STATE char(1) NOT NULL, + CC_TYPE char(1) NOT NULL, + CC_TBLPROPERTIES varchar(2048), + CC_WORKER_ID varchar(128), + CC_ENQUEUE_TIME NUMBER(19), + CC_START NUMBER(19), + CC_END NUMBER(19), + CC_RUN_AS varchar(128), + CC_HIGHEST_WRITE_ID NUMBER(19), + CC_META_INFO BLOB, + CC_HADOOP_JOB_ID varchar2(32), + CC_ERROR_MESSAGE CLOB, + CC_NEXT_TXN_ID NUMBER(19), + CC_TXN_ID NUMBER(19), + CC_COMMIT_TIME NUMBER(19), + CC_INITIATOR_ID varchar(128), + CC_INITIATOR_VERSION varchar(128), + CC_WORKER_VERSION varchar(128), + CC_POOL_NAME varchar(128), + CC_NUMBER_OF_BUCKETS integer, + CC_ORDER_BY varchar(4000) +) ROWDEPENDENCIES; + +CREATE INDEX COMPLETED_COMPACTIONS_RES ON COMPLETED_COMPACTIONS (CC_DATABASE,CC_TABLE,CC_PARTITION); + +-- HIVE-25842 +CREATE TABLE COMPACTION_METRICS_CACHE ( + CMC_DATABASE varchar(128) NOT NULL, + CMC_TABLE varchar(256) NOT NULL, + CMC_PARTITION varchar(767), + CMC_METRIC_TYPE varchar(128) NOT NULL, + CMC_METRIC_VALUE number(10) NOT NULL, + CMC_VERSION number(10) NOT NULL +) ROWDEPENDENCIES; + +CREATE TABLE AUX_TABLE ( + MT_KEY1 varchar2(128) NOT NULL, + MT_KEY2 number(19) NOT NULL, + MT_COMMENT varchar2(255), + PRIMARY KEY(MT_KEY1, MT_KEY2) +); + +CREATE TABLE WRITE_SET ( + WS_DATABASE varchar2(128) NOT NULL, + WS_TABLE varchar2(256) NOT NULL, + WS_PARTITION varchar2(767), + WS_TXNID number(19) NOT NULL, + WS_COMMIT_ID number(19) NOT NULL, + WS_OPERATION_TYPE char(1) NOT NULL +); + +CREATE TABLE TXN_TO_WRITE_ID ( + T2W_TXNID NUMBER(19) NOT NULL, + T2W_DATABASE VARCHAR2(128) NOT NULL, + T2W_TABLE VARCHAR2(256) NOT NULL, + T2W_WRITEID NUMBER(19) NOT NULL +); + +CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID); +CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID); + +CREATE TABLE NEXT_WRITE_ID ( + NWI_DATABASE VARCHAR2(128) NOT NULL, + NWI_TABLE VARCHAR2(256) NOT NULL, + NWI_NEXT NUMBER(19) NOT NULL +); + +CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE); + +CREATE TABLE MIN_HISTORY_WRITE_ID ( + MH_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID), + MH_DATABASE VARCHAR2(128) NOT NULL, + MH_TABLE VARCHAR2(256) NOT NULL, + MH_WRITEID NUMBER(19) NOT NULL +); + +CREATE TABLE MIN_HISTORY_LEVEL ( + MHL_TXNID NUMBER(19) NOT NULL, + MHL_MIN_OPEN_TXNID NUMBER(19) NOT NULL, + PRIMARY KEY(MHL_TXNID) +); + +CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID); + +CREATE TABLE MATERIALIZATION_REBUILD_LOCKS ( + MRL_TXN_ID NUMBER NOT NULL, + MRL_DB_NAME VARCHAR(128) NOT NULL, + MRL_TBL_NAME VARCHAR(256) NOT NULL, + MRL_LAST_HEARTBEAT NUMBER NOT NULL, + PRIMARY KEY(MRL_TXN_ID) +); + +CREATE TABLE "I_SCHEMA" ( + "SCHEMA_ID" number primary key, + "SCHEMA_TYPE" number not null, + "NAME" varchar2(256) unique, + "DB_ID" number references "DBS" ("DB_ID"), + "COMPATIBILITY" number not null, + "VALIDATION_LEVEL" number not null, + "CAN_EVOLVE" number(1) not null, + "SCHEMA_GROUP" varchar2(256), + "DESCRIPTION" varchar2(4000) +); + +CREATE TABLE "SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" number primary key, + "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"), + "VERSION" number not null, + "CREATED_AT" number not null, + "CD_ID" number references "CDS" ("CD_ID"), + "STATE" number not null, + "DESCRIPTION" varchar2(4000), + "SCHEMA_TEXT" clob, + "FINGERPRINT" varchar2(256), + "SCHEMA_VERSION_NAME" varchar2(256), + "SERDE_ID" number references "SERDES" ("SERDE_ID"), + UNIQUE ("SCHEMA_ID", "VERSION") +); + +CREATE TABLE REPL_TXN_MAP ( + RTM_REPL_POLICY varchar(256) NOT NULL, + RTM_SRC_TXN_ID number(19) NOT NULL, + RTM_TARGET_TXN_ID number(19) NOT NULL, + PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID) +); + +CREATE TABLE RUNTIME_STATS ( + RS_ID NUMBER primary key, + CREATE_TIME NUMBER(10) NOT NULL, + WEIGHT NUMBER(10) NOT NULL, + PAYLOAD BLOB +); + +CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME); + +CREATE TABLE TXN_WRITE_NOTIFICATION_LOG ( + WNL_ID number(19) NOT NULL, + WNL_TXNID number(19) NOT NULL, + WNL_WRITEID number(19) NOT NULL, + WNL_DATABASE varchar(128) NOT NULL, + WNL_TABLE varchar(256) NOT NULL, + WNL_PARTITION varchar(767), + WNL_TABLE_OBJ clob NOT NULL, + WNL_PARTITION_OBJ clob, + WNL_FILES clob, + WNL_EVENT_TIME number(10) NOT NULL +); + +CREATE INDEX TXN_WRITE_NOTIFICATION_LOG_IDX ON TXN_WRITE_NOTIFICATION_LOG (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION); + +INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); + +CREATE TABLE "SCHEDULED_QUERIES" ( + "SCHEDULED_QUERY_ID" number(19) NOT NULL, + "CLUSTER_NAMESPACE" VARCHAR(256), + "ENABLED" NUMBER(1) NOT NULL CHECK ("ENABLED" IN (1,0)), + "NEXT_EXECUTION" INTEGER, + "QUERY" VARCHAR(4000), + "SCHEDULE" VARCHAR(256), + "SCHEDULE_NAME" VARCHAR(256), + "USER" VARCHAR(256), + "ACTIVE_EXECUTION_ID" number(19), + CONSTRAINT SCHEDULED_QUERIES_PK PRIMARY KEY ("SCHEDULED_QUERY_ID") +); + +CREATE TABLE "SCHEDULED_EXECUTIONS" ( + "SCHEDULED_EXECUTION_ID" number(19) NOT NULL, + "END_TIME" INTEGER, + "ERROR_MESSAGE" VARCHAR(2000), + "EXECUTOR_QUERY_ID" VARCHAR(256), + "LAST_UPDATE_TIME" INTEGER, + "SCHEDULED_QUERY_ID" number(19), + "START_TIME" INTEGER, + "STATE" VARCHAR(256), + CONSTRAINT SCHEDULED_EXECUTIONS_PK PRIMARY KEY ("SCHEDULED_EXECUTION_ID"), + CONSTRAINT SCHEDULED_EXECUTIONS_SCHQ_FK FOREIGN KEY ("SCHEDULED_QUERY_ID") REFERENCES "SCHEDULED_QUERIES"("SCHEDULED_QUERY_ID") ON DELETE CASCADE +); + +CREATE INDEX IDX_SCHEDULED_EX_LAST_UPDATE ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); +CREATE INDEX IDX_SCHEDULED_EX_SQ_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); + +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" number PRIMARY KEY, + "RM_POLICY" varchar2(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" number NOT NULL, + "RM_METADATA" varchar2(4000), + "RM_PROGRESS" varchar2(4000), + "RM_START_TIME" integer NOT NULL, + "MESSAGE_FORMAT" VARCHAR(16) DEFAULT 'json-0.2' +); + +--Create indexes for the replication metrics table +CREATE INDEX POLICY_IDX ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX DUMP_IDX ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + +-- Create stored procedure tables +CREATE TABLE "STORED_PROCS" ( + "SP_ID" NUMBER NOT NULL, + "CREATE_TIME" NUMBER(10) NOT NULL, + "DB_ID" NUMBER NOT NULL, + "NAME" VARCHAR(256) NOT NULL, + "OWNER_NAME" VARCHAR(128) NOT NULL, + "SOURCE" NCLOB NOT NULL, + PRIMARY KEY ("SP_ID") +); + +CREATE UNIQUE INDEX UNIQUESTOREDPROC ON STORED_PROCS ("NAME", "DB_ID"); +ALTER TABLE "STORED_PROCS" ADD CONSTRAINT "STOREDPROC_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID"); + +-- Create stored procedure tables +CREATE TABLE "PACKAGES" ( + "PKG_ID" NUMBER NOT NULL, + "CREATE_TIME" NUMBER(10) NOT NULL, + "DB_ID" NUMBER NOT NULL, + "NAME" VARCHAR(256) NOT NULL, + "OWNER_NAME" VARCHAR(128) NOT NULL, + "HEADER" NCLOB NOT NULL, + "BODY" NCLOB NOT NULL, + PRIMARY KEY ("PKG_ID") +); + +CREATE UNIQUE INDEX UNIQUEPKG ON PACKAGES ("NAME", "DB_ID"); +ALTER TABLE "PACKAGES" ADD CONSTRAINT "PACKAGES_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID"); + +-- HIVE-24396 +-- Create DataConnectors and DataConnector_Params tables +CREATE TABLE DATACONNECTORS ( + NAME VARCHAR2(128) NOT NULL, + TYPE VARCHAR2(32) NOT NULL, + URL VARCHAR2(4000) NOT NULL, + "COMMENT" VARCHAR2(256), + OWNER_NAME VARCHAR2(256), + OWNER_TYPE VARCHAR2(10), + CREATE_TIME NUMBER(10) NOT NULL, + PRIMARY KEY (NAME) +); + +CREATE TABLE DATACONNECTOR_PARAMS ( + NAME VARCHAR2(128) NOT NULL, + PARAM_KEY VARCHAR2(180) NOT NULL, + PARAM_VALUE VARCHAR2(4000), + PRIMARY KEY (NAME, PARAM_KEY), + CONSTRAINT DATACONNECTOR_NAME_FK1 FOREIGN KEY (NAME) REFERENCES DATACONNECTORS (NAME) ON DELETE CASCADE +); + +-- Constraints for table DC_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDCPrivilege] +ALTER TABLE DC_PRIVS ADD CONSTRAINT DC_PRIVS_FK1 FOREIGN KEY (NAME) REFERENCES DATACONNECTORS (NAME) INITIALLY DEFERRED ; + +CREATE UNIQUE INDEX DCPRIVILEGEINDEX ON DC_PRIVS (AUTHORIZER,NAME,PRINCIPAL_NAME,PRINCIPAL_TYPE,DC_PRIV,GRANTOR, +GRANTOR_TYPE); + +CREATE INDEX DC_PRIVS_N49 ON DC_PRIVS (NAME); + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.1.0', 'Hive release version 4.1.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.oracle.sql index 1e25c1271c1e..781609ee1db1 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.oracle.sql +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.oracle.sql @@ -31,18 +31,22 @@ ALTER TABLE METASTORE_DB_PROPERTIES ADD PROPERTYCONTENT BLOB; -- HIVE-27457 UPDATE "SDS" - SET "SDS"."INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', - "SDS"."OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' - WHERE "SDS"."SD_ID" IN ( - SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%' + SET "INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', "OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' + WHERE "SD_ID" IN ( + SELECT "TBLS"."SD_ID" + FROM "TBLS" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); + UPDATE "SERDES" SET "SERDES"."SLIB" = 'org.apache.hadoop.hive.kudu.KuduSerDe' WHERE "SERDE_ID" IN ( SELECT "SDS"."SERDE_ID" FROM "TBLS" - LEFT JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" - WHERE "TBL_ID" IN (SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%') + INNER JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); -- These lines need to be last. Insert any changes above. diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.oracle.sql deleted file mode 100644 index 53c7cb140d77..000000000000 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.oracle.sql +++ /dev/null @@ -1,10 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2' AS Status from dual; - - --- HIVE-24815: Remove "IDXS" Table from Metastore Schema -DROP TABLE INDEX_PARAMS; -DROP TABLE IDXS; - --- These lines need to be last. Insert any changes above. -UPDATE VERSION SET SCHEMA_VERSION='4.0.0-beta-2', VERSION_COMMENT='Hive release version 4.0.0-beta-2' where VER_ID=1; -SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-beta-1-to-4.0.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-beta-1-to-4.0.0.oracle.sql new file mode 100644 index 000000000000..ca98a0914fd6 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-beta-1-to-4.0.0.oracle.sql @@ -0,0 +1,15 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0' AS Status from dual; + + +-- HIVE-24815: Remove "IDXS" Table from Metastore Schema +DROP TABLE INDEX_PARAMS; +DROP TABLE IDXS; + +-- HIVE-27827 +DROP INDEX UNIQUEPARTITION; +CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (TBL_ID, PART_NAME); +DROP INDEX PARTITIONS_N50; + +-- These lines need to be last. Insert any changes above. +UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql new file mode 100644 index 000000000000..e4c9f9a93439 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade-4.0.0-to-4.1.0.oracle.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0' AS Status from dual; + +-- These lines need to be last. Insert any changes above. +UPDATE VERSION SET SCHEMA_VERSION='4.1.0', VERSION_COMMENT='Hive release version 4.1.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0' AS Status from dual; diff --git a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade.order.oracle b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade.order.oracle index 20b3a4834342..3a18be160d0b 100644 --- a/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade.order.oracle +++ b/standalone-metastore/metastore-server/src/main/sql/oracle/upgrade.order.oracle @@ -15,4 +15,5 @@ 3.2.0-to-4.0.0-alpha-1 4.0.0-alpha-1-to-4.0.0-alpha-2 4.0.0-alpha-2-to-4.0.0-beta-1 -4.0.0-beta-1-to-4.0.0-beta-2 +4.0.0-beta-1-to-4.0.0 +4.0.0-to-4.1.0 diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0-beta-2.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql similarity index 99% rename from standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0-beta-2.postgres.sql rename to standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql index b247bc3fda09..7b09f72da85e 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0-beta-2.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql @@ -1027,7 +1027,7 @@ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY K -- ALTER TABLE ONLY "PARTITIONS" - ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID"); + ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("TBL_ID", "PART_NAME"); -- @@ -1135,13 +1135,6 @@ CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("A CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME"); --- --- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: --- - -CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID"); - - -- -- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: -- @@ -1973,4 +1966,4 @@ ALTER TABLE ONLY "DC_PRIVS" -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script -- ----------------------------------------------------------------- -INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '4.0.0-beta-2', 'Hive release version 4.0.0-beta-2'); +INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '4.0.0', 'Hive release version 4.0.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql new file mode 100644 index 000000000000..7abb5e787779 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/hive-schema-4.1.0.postgres.sql @@ -0,0 +1,1969 @@ +-- +-- PostgreSQL database dump +-- + +SET statement_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = off; +SET check_function_bodies = false; +SET client_min_messages = warning; +SET escape_string_warning = off; + +SET search_path = public, pg_catalog; + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "BUCKETING_COLS" ( + "SD_ID" bigint NOT NULL, + "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "CDS" ( + "CD_ID" bigint NOT NULL +); + + +-- +-- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "COLUMNS_V2" ( + "CD_ID" bigint NOT NULL, + "COMMENT" character varying(4000), + "COLUMN_NAME" character varying(767) NOT NULL, + "TYPE_NAME" text, + "INTEGER_IDX" integer NOT NULL +); + + +-- +-- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DATABASE_PARAMS" ( + "DB_ID" bigint NOT NULL, + "PARAM_KEY" character varying(180) NOT NULL, + "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying +); + + +CREATE TABLE "CTLGS" ( + "CTLG_ID" BIGINT PRIMARY KEY, + "NAME" VARCHAR(256) UNIQUE, + "DESC" VARCHAR(4000), + "LOCATION_URI" VARCHAR(4000) NOT NULL, + "CREATE_TIME" bigint +); + +-- Insert a default value. The location is TBD. Hive will fix this when it starts +INSERT INTO "CTLGS" VALUES (1, 'hive', 'Default catalog for Hive', 'TBD', NULL); + +-- +-- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DBS" ( + "DB_ID" bigint NOT NULL, + "DESC" character varying(4000) DEFAULT NULL::character varying, + "DB_LOCATION_URI" character varying(4000) NOT NULL, + "NAME" character varying(128) DEFAULT NULL::character varying, + "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying, + "CTLG_NAME" varchar(256) DEFAULT 'hive' NOT NULL, + "CREATE_TIME" bigint, + "DB_MANAGED_LOCATION_URI" character varying(4000), + "TYPE" character varying(32) DEFAULT 'NATIVE' NOT NULL, + "DATACONNECTOR_NAME" character varying(128), + "REMOTE_DBNAME" character varying(128) +); + + +-- +-- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DB_PRIVS" ( + "DB_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DB_ID" bigint, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "DB_PRIV" character varying(128) DEFAULT NULL::character varying, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: DC_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "DC_PRIVS" ( + "DC_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "NAME" character varying(128), + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "DC_PRIV" character varying(128) DEFAULT NULL::character varying, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "GLOBAL_PRIVS" ( + "USER_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "USER_PRIV" character varying(128) DEFAULT NULL::character varying, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + +-- +-- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "NUCLEUS_TABLES" ( + "CLASS_NAME" character varying(128) NOT NULL, + "TABLE_NAME" character varying(128) NOT NULL, + "TYPE" character varying(4) NOT NULL, + "OWNER" character varying(2) NOT NULL, + "VERSION" character varying(20) NOT NULL, + "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying +); + + +-- +-- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITIONS" ( + "PART_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "LAST_ACCESS_TIME" bigint NOT NULL, + "PART_NAME" character varying(767) DEFAULT NULL::character varying, + "SD_ID" bigint, + "TBL_ID" bigint, + "WRITE_ID" bigint DEFAULT 0 +); + + +-- +-- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_EVENTS" ( + "PART_NAME_ID" bigint NOT NULL, + "CAT_NAME" character varying(256), + "DB_NAME" character varying(128), + "EVENT_TIME" bigint NOT NULL, + "EVENT_TYPE" integer NOT NULL, + "PARTITION_NAME" character varying(767), + "TBL_NAME" character varying(256) +); + + +-- +-- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_KEYS" ( + "TBL_ID" bigint NOT NULL, + "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying, + "PKEY_NAME" character varying(128) NOT NULL, + "PKEY_TYPE" character varying(767) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_KEY_VALS" ( + "PART_ID" bigint NOT NULL, + "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PARTITION_PARAMS" ( + "PART_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_COL_PRIVS" ( + "PART_COLUMN_GRANT_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_PRIVS" ( + "PART_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_PRIV" character varying(128) DEFAULT NULL::character varying, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "ROLES" ( + "ROLE_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "OWNER_NAME" character varying(128) DEFAULT NULL::character varying, + "ROLE_NAME" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "ROLE_MAP" ( + "ROLE_GRANT_ID" bigint NOT NULL, + "ADD_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "ROLE_ID" bigint +); + + +-- +-- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SDS" ( + "SD_ID" bigint NOT NULL, + "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying, + "IS_COMPRESSED" boolean NOT NULL, + "LOCATION" character varying(4000) DEFAULT NULL::character varying, + "NUM_BUCKETS" bigint NOT NULL, + "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying, + "SERDE_ID" bigint, + "CD_ID" bigint, + "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL +); + + +-- +-- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SD_PARAMS" ( + "SD_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SEQUENCE_TABLE" ( + "SEQUENCE_NAME" character varying(255) NOT NULL, + "NEXT_VAL" bigint NOT NULL +); + +INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1); + +-- +-- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SERDES" ( + "SERDE_ID" bigint NOT NULL, + "NAME" character varying(128) DEFAULT NULL::character varying, + "SLIB" character varying(4000) DEFAULT NULL::character varying, + "DESCRIPTION" varchar(4000), + "SERIALIZER_CLASS" varchar(4000), + "DESERIALIZER_CLASS" varchar(4000), + "SERDE_TYPE" integer +); + + +-- +-- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SERDE_PARAMS" ( + "SERDE_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "SORT_COLS" ( + "SD_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "ORDER" bigint NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TABLE_PARAMS" ( + "TBL_ID" bigint NOT NULL, + "PARAM_KEY" character varying(256) NOT NULL, + "PARAM_VALUE" text DEFAULT NULL +); + + +-- +-- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBLS" ( + "TBL_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "DB_ID" bigint, + "LAST_ACCESS_TIME" bigint NOT NULL, + "OWNER" character varying(767) DEFAULT NULL::character varying, + "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying, + "RETENTION" bigint NOT NULL, + "SD_ID" bigint, + "TBL_NAME" character varying(256) DEFAULT NULL::character varying, + "TBL_TYPE" character varying(128) DEFAULT NULL::character varying, + "VIEW_EXPANDED_TEXT" text, + "VIEW_ORIGINAL_TEXT" text, + "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false, + "WRITE_ID" bigint DEFAULT 0 +); + +-- +-- Name: MV_CREATION_METADATA; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MV_CREATION_METADATA" ( + "MV_CREATION_METADATA_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) NOT NULL, + "DB_NAME" character varying(128) NOT NULL, + "TBL_NAME" character varying(256) NOT NULL, + "TXN_LIST" text, + "MATERIALIZATION_TIME" bigint NOT NULL +); + +-- +-- Name: MV_TABLES_USED; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MV_TABLES_USED" ( + "MV_CREATION_METADATA_ID" bigint NOT NULL, + "TBL_ID" bigint NOT NULL, + "INSERTED_COUNT" bigint NOT NULL DEFAULT 0, + "UPDATED_COUNT" bigint NOT NULL DEFAULT 0, + "DELETED_COUNT" bigint NOT NULL DEFAULT 0, + CONSTRAINT "MV_TABLES_USED_PK" PRIMARY KEY ("TBL_ID", "MV_CREATION_METADATA_ID") +); + +-- +-- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBL_COL_PRIVS" ( + "TBL_COLUMN_GRANT_ID" bigint NOT NULL, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TBL_PRIVS" ( + "TBL_GRANT_ID" bigint NOT NULL, + "CREATE_TIME" bigint NOT NULL, + "GRANT_OPTION" smallint NOT NULL, + "GRANTOR" character varying(128) DEFAULT NULL::character varying, + "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying, + "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_PRIV" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint, + "AUTHORIZER" character varying(128) DEFAULT NULL::character varying +); + + +-- +-- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TYPES" ( + "TYPES_ID" bigint NOT NULL, + "TYPE_NAME" character varying(128) DEFAULT NULL::character varying, + "TYPE1" character varying(767) DEFAULT NULL::character varying, + "TYPE2" character varying(767) DEFAULT NULL::character varying +); + + +-- +-- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "TYPE_FIELDS" ( + "TYPE_NAME" bigint NOT NULL, + "COMMENT" character varying(256) DEFAULT NULL::character varying, + "FIELD_NAME" character varying(128) NOT NULL, + "FIELD_TYPE" character varying(767) NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_STRING_LIST" ( + "STRING_LIST_ID" bigint NOT NULL +); + +CREATE TABLE "SKEWED_STRING_LIST_VALUES" ( + "STRING_LIST_ID" bigint NOT NULL, + "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_COL_NAMES" ( + "SD_ID" bigint NOT NULL, + "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying, + "INTEGER_IDX" bigint NOT NULL +); + +CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" ( + "SD_ID" bigint NOT NULL, + "STRING_LIST_ID_KID" bigint NOT NULL, + "LOCATION" character varying(4000) DEFAULT NULL::character varying +); + +CREATE TABLE "SKEWED_VALUES" ( + "SD_ID_OID" bigint NOT NULL, + "STRING_LIST_ID_EID" bigint NOT NULL, + "INTEGER_IDX" bigint NOT NULL +); + + +-- +-- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "MASTER_KEYS" +( + "KEY_ID" SERIAL, + "MASTER_KEY" varchar(767) NULL, + PRIMARY KEY ("KEY_ID") +); + +CREATE TABLE "DELEGATION_TOKENS" +( + "TOKEN_IDENT" varchar(767) NOT NULL, + "TOKEN" varchar(767) NULL, + PRIMARY KEY ("TOKEN_IDENT") +); + +CREATE TABLE "TAB_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "TBL_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "BIT_VECTOR" bytea, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL, + "ENGINE" character varying(128) NOT NULL, + "HISTOGRAM" bytea +); + +-- +-- Table structure for VERSION +-- +CREATE TABLE "VERSION" ( + "VER_ID" bigint, + "SCHEMA_VERSION" character varying(127) NOT NULL, + "VERSION_COMMENT" character varying(255) NOT NULL +); + +-- +-- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE TABLE "PART_COL_STATS" ( + "CS_ID" bigint NOT NULL, + "CAT_NAME" character varying(256) DEFAULT NULL::character varying, + "DB_NAME" character varying(128) DEFAULT NULL::character varying, + "TABLE_NAME" character varying(256) DEFAULT NULL::character varying, + "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying, + "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying, + "PART_ID" bigint NOT NULL, + "LONG_LOW_VALUE" bigint, + "LONG_HIGH_VALUE" bigint, + "DOUBLE_LOW_VALUE" double precision, + "DOUBLE_HIGH_VALUE" double precision, + "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying, + "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying, + "NUM_NULLS" bigint NOT NULL, + "NUM_DISTINCTS" bigint, + "BIT_VECTOR" bytea, + "AVG_COL_LEN" double precision, + "MAX_COL_LEN" bigint, + "NUM_TRUES" bigint, + "NUM_FALSES" bigint, + "LAST_ANALYZED" bigint NOT NULL, + "ENGINE" character varying(128) NOT NULL, + "HISTOGRAM" bytea +); + +-- +-- Table structure for FUNCS +-- +CREATE TABLE "FUNCS" ( + "FUNC_ID" BIGINT NOT NULL, + "CLASS_NAME" VARCHAR(4000), + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT, + "FUNC_NAME" VARCHAR(128), + "FUNC_TYPE" INTEGER NOT NULL, + "OWNER_NAME" VARCHAR(128), + "OWNER_TYPE" VARCHAR(10), + PRIMARY KEY ("FUNC_ID") +); + +-- +-- Table structure for FUNC_RU +-- +CREATE TABLE "FUNC_RU" ( + "FUNC_ID" BIGINT NOT NULL, + "RESOURCE_TYPE" INTEGER NOT NULL, + "RESOURCE_URI" VARCHAR(4000), + "INTEGER_IDX" INTEGER NOT NULL, + PRIMARY KEY ("FUNC_ID", "INTEGER_IDX") +); + +CREATE TABLE "NOTIFICATION_LOG" +( + "NL_ID" BIGINT NOT NULL, + "EVENT_ID" BIGINT NOT NULL, + "EVENT_TIME" INTEGER NOT NULL, + "EVENT_TYPE" VARCHAR(32) NOT NULL, + "CAT_NAME" VARCHAR(256), + "DB_NAME" VARCHAR(128), + "TBL_NAME" VARCHAR(256), + "MESSAGE" text, + "MESSAGE_FORMAT" VARCHAR(16), + PRIMARY KEY ("NL_ID") +); + +CREATE UNIQUE INDEX "NOTIFICATION_LOG_EVENT_ID" ON "NOTIFICATION_LOG" USING btree ("EVENT_ID"); + +CREATE TABLE "NOTIFICATION_SEQUENCE" +( + "NNI_ID" BIGINT NOT NULL, + "NEXT_EVENT_ID" BIGINT NOT NULL, + PRIMARY KEY ("NNI_ID") +); + +INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE"); + +CREATE TABLE "KEY_CONSTRAINTS" +( + "CHILD_CD_ID" BIGINT, + "CHILD_INTEGER_IDX" BIGINT, + "CHILD_TBL_ID" BIGINT, + "PARENT_CD_ID" BIGINT, + "PARENT_INTEGER_IDX" BIGINT NOT NULL, + "PARENT_TBL_ID" BIGINT NOT NULL, + "POSITION" BIGINT NOT NULL, + "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, + "CONSTRAINT_TYPE" SMALLINT NOT NULL, + "UPDATE_RULE" SMALLINT, + "DELETE_RULE" SMALLINT, + "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, + "DEFAULT_VALUE" VARCHAR(400), + CONSTRAINT CONSTRAINTS_PK PRIMARY KEY ("PARENT_TBL_ID", "CONSTRAINT_NAME", "POSITION") +) ; + +--- +--- Table structure for METASTORE_DB_PROPERTIES +--- +CREATE TABLE "METASTORE_DB_PROPERTIES" +( + "PROPERTY_KEY" VARCHAR(255) NOT NULL, + "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, + "DESCRIPTION" VARCHAR(1000), + "PROPERTYCONTENT" bytea +); + + +CREATE TABLE "WM_RESOURCEPLAN" ( + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "NS" character varying(128), + "QUERY_PARALLELISM" integer, + "STATUS" character varying(20) NOT NULL, + "DEFAULT_POOL_ID" bigint +); + +CREATE TABLE "WM_POOL" ( + "POOL_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "PATH" character varying(1024) NOT NULL, + "ALLOC_FRACTION" double precision, + "QUERY_PARALLELISM" integer, + "SCHEDULING_POLICY" character varying(1024) +); + +CREATE TABLE "WM_TRIGGER" ( + "TRIGGER_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "NAME" character varying(128) NOT NULL, + "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying, + "IS_IN_UNMANAGED" smallint NOT NULL DEFAULT 0 +); + +CREATE TABLE "WM_POOL_TO_TRIGGER" ( + "POOL_ID" bigint NOT NULL, + "TRIGGER_ID" bigint NOT NULL +); + +CREATE TABLE "WM_MAPPING" ( + "MAPPING_ID" bigint NOT NULL, + "RP_ID" bigint NOT NULL, + "ENTITY_TYPE" character varying(128) NOT NULL, + "ENTITY_NAME" character varying(128) NOT NULL, + "POOL_ID" bigint, + "ORDERING" integer +); + +-- +-- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "BUCKETING_COLS" + ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + + +-- +-- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "CDS" + ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID"); + + +-- +-- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "COLUMNS_V2" + ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME"); + + +-- +-- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DATABASE_PARAMS" + ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY"); + + +-- +-- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); + +-- +-- Name: DCPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DC_PRIVS" + ADD CONSTRAINT "DCPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DC_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DBS" + ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID"); + + +-- +-- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID"); + + +-- +-- Name: DC_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DC_PRIVS" + ADD CONSTRAINT "DC_PRIVS_pkey" PRIMARY KEY ("DC_GRANT_ID"); + + +-- +-- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "GLOBAL_PRIVS" + ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "GLOBAL_PRIVS" + ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID"); + + +-- +-- Name: ONE_ROW_CONSTRAINT; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE "NOTIFICATION_SEQUENCE" + ADD CONSTRAINT "ONE_ROW_CONSTRAINT" CHECK ("NNI_ID" = 1); + + +-- +-- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "NUCLEUS_TABLES" + ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME"); + + +-- +-- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID"); + + +-- +-- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_EVENTS" + ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID"); + + +-- +-- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_KEYS" + ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); + + +-- +-- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_KEY_VALS" + ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX"); + + +-- +-- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITION_PARAMS" + ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY"); + + +-- +-- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PART_COL_PRIVS" + ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID"); + + +-- +-- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PART_PRIVS" + ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID"); + + +-- +-- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLES" + ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME"); + + +-- +-- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLES" + ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID"); + + +-- +-- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID"); + + +-- +-- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID"); + + +-- +-- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SD_PARAMS" + ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY"); + + +-- +-- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SEQUENCE_TABLE" + ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME"); + + +-- +-- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SERDES" + ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID"); + + +-- +-- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SERDE_PARAMS" + ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY"); + + +-- +-- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "SORT_COLS" + ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + + +-- +-- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TABLE_PARAMS" + ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY"); + + +-- +-- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID"); + + +-- +-- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBL_COL_PRIVS" + ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID"); + + +-- +-- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBL_PRIVS" + ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID"); + + +-- +-- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPES" + ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID"); + + +-- +-- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPE_FIELDS" + ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST" + ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES" + ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX"); + + +ALTER TABLE ONLY "SKEWED_COL_NAMES" + ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX"); + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID"); + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX"); + +-- +-- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- +ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID"); + +-- +-- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- +ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID"); + + +-- +-- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("TBL_ID", "PART_NAME"); + + +-- +-- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID"); + + +-- +-- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "DBS" + ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME", "CTLG_NAME"); + + +-- +-- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "TYPES" + ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME"); + + +-- +-- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE"); + +ALTER TABLE ONLY "METASTORE_DB_PROPERTIES" + ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY"); + + +-- Resource plan: Primary key and unique key constraints. +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID"); + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NS", "NAME"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID"); + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID"); + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME"); + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID"); + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME"); + +-- +-- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID"); + + +-- +-- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID"); + + +-- +-- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID"); + + +-- +-- Name: DC_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "DC_PRIVS_N49" ON "DC_PRIVS" USING btree ("NAME"); + + +-- +-- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME"); + + +-- +-- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID"); + + +-- +-- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID"); + + +-- +-- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID"); + + +-- +-- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID"); + + +-- +-- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID"); + + +-- +-- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID"); + + +-- +-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME","CAT_NAME"); + + +-- +-- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID"); + + +-- +-- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID"); + +-- +-- Name: SDS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SDS_N50" ON "SDS" USING btree ("CD_ID"); + +-- +-- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID"); + + +-- +-- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID"); + + +-- +-- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID"); + + +-- +-- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE"); + + +-- +-- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID"); + + +-- +-- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID"); + + +-- +-- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID"); + + +-- +-- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID"); + +-- +-- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","CAT_NAME"); + +-- +-- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID"); + + +-- +-- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME"); + +-- +-- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID"); + +-- +-- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID"); + +-- +-- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID"); + +-- +-- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID"); + +-- +-- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID"); + +CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID"); + +CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE"); + +ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES" + ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + + +ALTER TABLE ONLY "SKEWED_COL_NAMES" + ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP" + ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE; + +ALTER TABLE ONLY "SKEWED_VALUES" + ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "BUCKETING_COLS" + ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "COLUMNS_V2" + ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE; + + +-- +-- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "DATABASE_PARAMS" + ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "DB_PRIVS" + ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITIONS" + ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_KEYS" + ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_KEY_VALS" + ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PARTITION_PARAMS" + ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PART_COL_PRIVS" + ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "PART_PRIVS" + ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + + +-- +-- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "ROLE_MAP" + ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE; + + +-- +-- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE; + + +-- +-- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SDS" + ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE; + + +-- +-- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SD_PARAMS" + ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SERDE_PARAMS" + ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE; + + +-- +-- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "SORT_COLS" + ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TABLE_PARAMS" + ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE; + + +-- +-- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBLS" + ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE; + + +-- +-- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBL_COL_PRIVS" + ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TBL_PRIVS" + ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- + +ALTER TABLE ONLY "TYPE_FIELDS" + ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE; + +-- +-- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- +ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE; + + +-- +-- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +-- +ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE; + +ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME"); + +ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID"); + +-- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +ALTER TABLE ONLY "FUNCS" + ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE; + +-- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser +ALTER TABLE ONLY "FUNC_RU" + ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE; + +-- Resource plan FK constraints. + +ALTER TABLE ONLY "WM_POOL" + ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_RESOURCEPLAN" + ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_TRIGGER" + ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_POOL_TO_TRIGGER" + ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE; + +ALTER TABLE ONLY "WM_MAPPING" + ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE; + +ALTER TABLE ONLY "MV_CREATION_METADATA" + ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID"); + +CREATE INDEX "MV_UNIQUE_TABLE" + ON "MV_CREATION_METADATA" USING btree ("TBL_NAME", "DB_NAME"); + +ALTER TABLE ONLY "MV_TABLES_USED" + ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") DEFERRABLE; + +ALTER TABLE ONLY "MV_TABLES_USED" + ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE; + +-- +-- Name: public; Type: ACL; Schema: -; Owner: hiveuser +-- + +REVOKE ALL ON SCHEMA public FROM PUBLIC; +GRANT ALL ON SCHEMA public TO PUBLIC; + +-- +-- PostgreSQL database dump complete +-- + +------------------------------ +-- Transaction and lock tables +------------------------------ +CREATE TABLE "TXNS" ( + "TXN_ID" bigserial PRIMARY KEY, + "TXN_STATE" char(1) NOT NULL, + "TXN_STARTED" bigint NOT NULL, + "TXN_LAST_HEARTBEAT" bigint NOT NULL, + "TXN_USER" varchar(128) NOT NULL, + "TXN_HOST" varchar(128) NOT NULL, + "TXN_AGENT_INFO" varchar(128), + "TXN_META_INFO" varchar(128), + "TXN_HEARTBEAT_COUNT" integer, + "TXN_TYPE" integer +); +INSERT INTO "TXNS" ("TXN_ID", "TXN_STATE", "TXN_STARTED", "TXN_LAST_HEARTBEAT", "TXN_USER", "TXN_HOST") + VALUES(0, 'c', 0, 0, '', ''); + +CREATE TABLE "TXN_COMPONENTS" ( + "TC_TXNID" bigint NOT NULL REFERENCES "TXNS" ("TXN_ID"), + "TC_DATABASE" varchar(128) NOT NULL, + "TC_TABLE" varchar(256), + "TC_PARTITION" varchar(767) DEFAULT NULL, + "TC_OPERATION_TYPE" char(1) NOT NULL, + "TC_WRITEID" bigint +); + +CREATE INDEX TC_TXNID_INDEX ON "TXN_COMPONENTS" USING hash ("TC_TXNID"); + +CREATE TABLE "COMPLETED_TXN_COMPONENTS" ( + "CTC_TXNID" bigint NOT NULL, + "CTC_DATABASE" varchar(128) NOT NULL, + "CTC_TABLE" varchar(256), + "CTC_PARTITION" varchar(767), + "CTC_TIMESTAMP" timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL, + "CTC_WRITEID" bigint, + "CTC_UPDATE_DELETE" char(1) NOT NULL +); + +CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON "COMPLETED_TXN_COMPONENTS" USING btree ("CTC_DATABASE", "CTC_TABLE", "CTC_PARTITION"); + +CREATE TABLE "TXN_LOCK_TBL" ( + "TXN_LOCK" bigint NOT NULL +); +INSERT INTO "TXN_LOCK_TBL" VALUES(1); + +CREATE TABLE "HIVE_LOCKS" ( + "HL_LOCK_EXT_ID" bigint NOT NULL, + "HL_LOCK_INT_ID" bigint NOT NULL, + "HL_TXNID" bigint NOT NULL, + "HL_DB" varchar(128) NOT NULL, + "HL_TABLE" varchar(256), + "HL_PARTITION" varchar(767) DEFAULT NULL, + "HL_LOCK_STATE" char(1) NOT NULL, + "HL_LOCK_TYPE" char(1) NOT NULL, + "HL_LAST_HEARTBEAT" bigint NOT NULL, + "HL_ACQUIRED_AT" bigint, + "HL_USER" varchar(128) NOT NULL, + "HL_HOST" varchar(128) NOT NULL, + "HL_HEARTBEAT_COUNT" integer, + "HL_AGENT_INFO" varchar(128), + "HL_BLOCKEDBY_EXT_ID" bigint, + "HL_BLOCKEDBY_INT_ID" bigint, + PRIMARY KEY("HL_LOCK_EXT_ID", "HL_LOCK_INT_ID") +); + +CREATE INDEX HL_TXNID_INDEX ON "HIVE_LOCKS" USING hash ("HL_TXNID"); + +CREATE TABLE "NEXT_LOCK_ID" ( + "NL_NEXT" bigint NOT NULL +); +INSERT INTO "NEXT_LOCK_ID" VALUES(1); + +CREATE TABLE "COMPACTION_QUEUE" ( + "CQ_ID" bigint PRIMARY KEY, + "CQ_DATABASE" varchar(128) NOT NULL, + "CQ_TABLE" varchar(256) NOT NULL, + "CQ_PARTITION" varchar(767), + "CQ_STATE" char(1) NOT NULL, + "CQ_TYPE" char(1) NOT NULL, + "CQ_TBLPROPERTIES" varchar(2048), + "CQ_WORKER_ID" varchar(128), + "CQ_ENQUEUE_TIME" bigint, + "CQ_START" bigint, + "CQ_RUN_AS" varchar(128), + "CQ_HIGHEST_WRITE_ID" bigint, + "CQ_META_INFO" bytea, + "CQ_HADOOP_JOB_ID" varchar(32), + "CQ_ERROR_MESSAGE" text, + "CQ_NEXT_TXN_ID" bigint, + "CQ_TXN_ID" bigint, + "CQ_COMMIT_TIME" bigint, + "CQ_INITIATOR_ID" varchar(128), + "CQ_INITIATOR_VERSION" varchar(128), + "CQ_WORKER_VERSION" varchar(128), + "CQ_CLEANER_START" bigint, + "CQ_RETRY_RETENTION" bigint not null default 0, + "CQ_POOL_NAME" varchar(128), + "CQ_NUMBER_OF_BUCKETS" integer, + "CQ_ORDER_BY" varchar(4000) +); + +CREATE TABLE "NEXT_COMPACTION_QUEUE_ID" ( + "NCQ_NEXT" bigint NOT NULL +); +INSERT INTO "NEXT_COMPACTION_QUEUE_ID" VALUES(1); + +CREATE TABLE "COMPLETED_COMPACTIONS" ( + "CC_ID" bigint PRIMARY KEY, + "CC_DATABASE" varchar(128) NOT NULL, + "CC_TABLE" varchar(256) NOT NULL, + "CC_PARTITION" varchar(767), + "CC_STATE" char(1) NOT NULL, + "CC_TYPE" char(1) NOT NULL, + "CC_TBLPROPERTIES" varchar(2048), + "CC_WORKER_ID" varchar(128), + "CC_ENQUEUE_TIME" bigint, + "CC_START" bigint, + "CC_END" bigint, + "CC_RUN_AS" varchar(128), + "CC_HIGHEST_WRITE_ID" bigint, + "CC_META_INFO" bytea, + "CC_HADOOP_JOB_ID" varchar(32), + "CC_ERROR_MESSAGE" text, + "CC_NEXT_TXN_ID" bigint, + "CC_TXN_ID" bigint, + "CC_COMMIT_TIME" bigint, + "CC_INITIATOR_ID" varchar(128), + "CC_INITIATOR_VERSION" varchar(128), + "CC_WORKER_VERSION" varchar(128), + "CC_POOL_NAME" varchar(128), + "CC_NUMBER_OF_BUCKETS" integer, + "CC_ORDER_BY" varchar(4000) +); + +CREATE INDEX "COMPLETED_COMPACTIONS_RES" ON "COMPLETED_COMPACTIONS" ("CC_DATABASE","CC_TABLE","CC_PARTITION"); + +-- HIVE-25842 +CREATE TABLE "COMPACTION_METRICS_CACHE" ( + "CMC_DATABASE" varchar(128) NOT NULL, + "CMC_TABLE" varchar(256) NOT NULL, + "CMC_PARTITION" varchar(767), + "CMC_METRIC_TYPE" varchar(128) NOT NULL, + "CMC_METRIC_VALUE" integer NOT NULL, + "CMC_VERSION" integer NOT NULL +); + +CREATE TABLE "AUX_TABLE" ( + "MT_KEY1" varchar(128) NOT NULL, + "MT_KEY2" bigint NOT NULL, + "MT_COMMENT" varchar(255), + PRIMARY KEY("MT_KEY1", "MT_KEY2") +); + +CREATE TABLE "WRITE_SET" ( + "WS_DATABASE" varchar(128) NOT NULL, + "WS_TABLE" varchar(256) NOT NULL, + "WS_PARTITION" varchar(767), + "WS_TXNID" bigint NOT NULL, + "WS_COMMIT_ID" bigint NOT NULL, + "WS_OPERATION_TYPE" char(1) NOT NULL +); + +CREATE TABLE "TXN_TO_WRITE_ID" ( + "T2W_TXNID" bigint NOT NULL, + "T2W_DATABASE" varchar(128) NOT NULL, + "T2W_TABLE" varchar(256) NOT NULL, + "T2W_WRITEID" bigint NOT NULL +); + +CREATE UNIQUE INDEX "TBL_TO_TXN_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_DATABASE", "T2W_TABLE", "T2W_TXNID"); +CREATE UNIQUE INDEX "TBL_TO_WRITE_ID_IDX" ON "TXN_TO_WRITE_ID" ("T2W_DATABASE", "T2W_TABLE", "T2W_WRITEID"); + +CREATE TABLE "NEXT_WRITE_ID" ( + "NWI_DATABASE" varchar(128) NOT NULL, + "NWI_TABLE" varchar(256) NOT NULL, + "NWI_NEXT" bigint NOT NULL +); + +CREATE UNIQUE INDEX "NEXT_WRITE_ID_IDX" ON "NEXT_WRITE_ID" ("NWI_DATABASE", "NWI_TABLE"); + +CREATE TABLE "MIN_HISTORY_WRITE_ID" ( + "MH_TXNID" bigint NOT NULL REFERENCES "TXNS" ("TXN_ID"), + "MH_DATABASE" varchar(128) NOT NULL, + "MH_TABLE" varchar(256) NOT NULL, + "MH_WRITEID" bigint NOT NULL +); + +CREATE TABLE "MIN_HISTORY_LEVEL" ( + "MHL_TXNID" bigint NOT NULL, + "MHL_MIN_OPEN_TXNID" bigint NOT NULL, + PRIMARY KEY("MHL_TXNID") +); + +CREATE INDEX "MIN_HISTORY_LEVEL_IDX" ON "MIN_HISTORY_LEVEL" ("MHL_MIN_OPEN_TXNID"); + +CREATE TABLE "MATERIALIZATION_REBUILD_LOCKS" ( + "MRL_TXN_ID" bigint NOT NULL, + "MRL_DB_NAME" varchar(128) NOT NULL, + "MRL_TBL_NAME" varchar(256) NOT NULL, + "MRL_LAST_HEARTBEAT" bigint NOT NULL, + PRIMARY KEY("MRL_TXN_ID") +); + +CREATE TABLE "I_SCHEMA" ( + "SCHEMA_ID" bigint primary key, + "SCHEMA_TYPE" integer not null, + "NAME" varchar(256) unique, + "DB_ID" bigint references "DBS" ("DB_ID"), + "COMPATIBILITY" integer not null, + "VALIDATION_LEVEL" integer not null, + "CAN_EVOLVE" boolean not null, + "SCHEMA_GROUP" varchar(256), + "DESCRIPTION" varchar(4000) +); + +CREATE TABLE "SCHEMA_VERSION" ( + "SCHEMA_VERSION_ID" bigint primary key, + "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"), + "VERSION" integer not null, + "CREATED_AT" bigint not null, + "CD_ID" bigint references "CDS" ("CD_ID"), + "STATE" integer not null, + "DESCRIPTION" varchar(4000), + "SCHEMA_TEXT" text, + "FINGERPRINT" varchar(256), + "SCHEMA_VERSION_NAME" varchar(256), + "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), + unique ("SCHEMA_ID", "VERSION") +); + +CREATE TABLE "REPL_TXN_MAP" ( + "RTM_REPL_POLICY" varchar(256) NOT NULL, + "RTM_SRC_TXN_ID" bigint NOT NULL, + "RTM_TARGET_TXN_ID" bigint NOT NULL, + PRIMARY KEY ("RTM_REPL_POLICY", "RTM_SRC_TXN_ID") +); + + +CREATE TABLE "RUNTIME_STATS" ( + "RS_ID" bigint primary key, + "CREATE_TIME" bigint NOT NULL, + "WEIGHT" bigint NOT NULL, + "PAYLOAD" bytea +); + +CREATE INDEX "IDX_RUNTIME_STATS_CREATE_TIME" ON "RUNTIME_STATS" ("CREATE_TIME"); + + + +CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" ( + "WNL_ID" bigint NOT NULL, + "WNL_TXNID" bigint NOT NULL, + "WNL_WRITEID" bigint NOT NULL, + "WNL_DATABASE" varchar(128) NOT NULL, + "WNL_TABLE" varchar(256) NOT NULL, + "WNL_PARTITION" varchar(767) NOT NULL, + "WNL_TABLE_OBJ" text NOT NULL, + "WNL_PARTITION_OBJ" text, + "WNL_FILES" text, + "WNL_EVENT_TIME" integer NOT NULL, + PRIMARY KEY ("WNL_TXNID", "WNL_DATABASE", "WNL_TABLE", "WNL_PARTITION") +); + +INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1); + +CREATE TABLE "SCHEDULED_QUERIES" ( + "SCHEDULED_QUERY_ID" BIGINT NOT NULL, + "CLUSTER_NAMESPACE" VARCHAR(256), + "ENABLED" boolean NOT NULL, + "NEXT_EXECUTION" INTEGER, + "QUERY" VARCHAR(4000), + "SCHEDULE" VARCHAR(256), + "SCHEDULE_NAME" VARCHAR(256), + "USER" VARCHAR(256), + "ACTIVE_EXECUTION_ID" BIGINT, + CONSTRAINT "SCHEDULED_QUERIES_PK" PRIMARY KEY ("SCHEDULED_QUERY_ID") +); + +CREATE TABLE "SCHEDULED_EXECUTIONS" ( + "SCHEDULED_EXECUTION_ID" BIGINT NOT NULL, + "END_TIME" INTEGER, + "ERROR_MESSAGE" VARCHAR(2000), + "EXECUTOR_QUERY_ID" VARCHAR(256), + "LAST_UPDATE_TIME" INTEGER, + "SCHEDULED_QUERY_ID" BIGINT, + "START_TIME" INTEGER, + "STATE" VARCHAR(256), + CONSTRAINT "SCHEDULED_EXECUTIONS_PK" PRIMARY KEY ("SCHEDULED_EXECUTION_ID"), + CONSTRAINT "SCHEDULED_EXECUTIONS_SCHQ_FK" FOREIGN KEY ("SCHEDULED_QUERY_ID") REFERENCES "SCHEDULED_QUERIES"("SCHEDULED_QUERY_ID") ON DELETE CASCADE +); + +CREATE INDEX IDX_SCHEDULED_EXECUTIONS_LAST_UPDATE_TIME ON "SCHEDULED_EXECUTIONS" ("LAST_UPDATE_TIME"); +CREATE INDEX IDX_SCHEDULED_EXECUTIONS_SCHEDULED_QUERY_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_QUERY_ID"); +CREATE UNIQUE INDEX UNIQUE_SCHEDULED_EXECUTIONS_ID ON "SCHEDULED_EXECUTIONS" ("SCHEDULED_EXECUTION_ID"); + +--Create table replication metrics +CREATE TABLE "REPLICATION_METRICS" ( + "RM_SCHEDULED_EXECUTION_ID" bigint NOT NULL, + "RM_POLICY" varchar(256) NOT NULL, + "RM_DUMP_EXECUTION_ID" bigint NOT NULL, + "RM_METADATA" varchar(4000), + "RM_PROGRESS" varchar(10000), + "RM_START_TIME" integer NOT NULL, + "MESSAGE_FORMAT" VARCHAR(16) DEFAULT 'json-0.2', + PRIMARY KEY("RM_SCHEDULED_EXECUTION_ID") +); + +--Create indexes for the replication metrics table +CREATE INDEX "POLICY_IDX" ON "REPLICATION_METRICS" ("RM_POLICY"); +CREATE INDEX "DUMP_IDX" ON "REPLICATION_METRICS" ("RM_DUMP_EXECUTION_ID"); + +-- Create stored procedure tables + CREATE TABLE "STORED_PROCS" ( + "SP_ID" BIGINT NOT NULL, + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) NOT NULL, + "OWNER_NAME" VARCHAR(128) NOT NULL, + "SOURCE" TEXT NOT NULL, + PRIMARY KEY ("SP_ID") +); + +CREATE UNIQUE INDEX "UNIQUESTOREDPROC" ON "STORED_PROCS" ("NAME", "DB_ID"); +ALTER TABLE ONLY "STORED_PROCS" ADD CONSTRAINT "STOREDPROC_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE; + +-- Create stored procedure tables +CREATE TABLE "PACKAGES" ( + "PKG_ID" BIGINT NOT NULL, + "CREATE_TIME" INTEGER NOT NULL, + "DB_ID" BIGINT NOT NULL, + "NAME" VARCHAR(256) NOT NULL, + "OWNER_NAME" VARCHAR(128) NOT NULL, + "HEADER" TEXT NOT NULL, + "BODY" TEXT NOT NULL, + PRIMARY KEY ("PKG_ID") +); + +CREATE UNIQUE INDEX "UNIQUEPKG" ON "PACKAGES" ("NAME", "DB_ID"); +ALTER TABLE ONLY "PACKAGES" ADD CONSTRAINT "PACKAGES_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE; + +-- HIVE-24396 +-- Create DataConnectors and DataConnector_Params tables +CREATE TABLE "DATACONNECTORS" ( + "NAME" character varying(128) NOT NULL, + "TYPE" character varying(32) NOT NULL, + "URL" character varying(4000) NOT NULL, + "COMMENT" character varying(256), + "OWNER_NAME" character varying(256), + "OWNER_TYPE" character varying(10), + "CREATE_TIME" INTEGER NOT NULL, + PRIMARY KEY ("NAME") +); + +CREATE TABLE "DATACONNECTOR_PARAMS" ( + "NAME" character varying(128) NOT NULL, + "PARAM_KEY" character varying(180) NOT NULL, + "PARAM_VALUE" character varying(4000), + PRIMARY KEY ("NAME", "PARAM_KEY"), + CONSTRAINT "DATACONNECTOR_NAME_FK1" FOREIGN KEY ("NAME") REFERENCES "DATACONNECTORS"("NAME") ON DELETE CASCADE +); + +ALTER TABLE ONLY "DC_PRIVS" + ADD CONSTRAINT "DC_PRIVS_DC_ID_fkey" FOREIGN KEY ("NAME") REFERENCES "DATACONNECTORS"("NAME") DEFERRABLE; + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '4.1.0', 'Hive release version 4.1.0'); diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.postgres.sql index fd6459e234a9..d6a3162d842e 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.postgres.sql +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-alpha-2-to-4.0.0-beta-1.postgres.sql @@ -31,18 +31,22 @@ ALTER TABLE "METASTORE_DB_PROPERTIES" ADD "PROPERTYCONTENT" bytea; -- HIVE-27457 UPDATE "SDS" - SET "INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', - "OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' - WHERE "SDS"."SD_ID" IN ( - SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%' + SET "INPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduInputFormat', "OUTPUT_FORMAT" = 'org.apache.hadoop.hive.kudu.KuduOutputFormat' + WHERE "SD_ID" IN ( + SELECT "TBLS"."SD_ID" + FROM "TBLS" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%' ); + UPDATE "SERDES" SET "SLIB" = 'org.apache.hadoop.hive.kudu.KuduSerDe' WHERE "SERDE_ID" IN ( SELECT "SDS"."SERDE_ID" FROM "TBLS" - LEFT JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" - WHERE "TBL_ID" IN (SELECT "TBL_ID" FROM "TABLE_PARAMS" WHERE "PARAM_VALUE" LIKE '%KuduStorageHandler%') + INNER JOIN "SDS" ON "TBLS"."SD_ID" = "SDS"."SD_ID" + INNER JOIN "TABLE_PARAMS" ON "TBLS"."TBL_ID" = "TABLE_PARAMS"."TBL_ID" + WHERE "TABLE_PARAMS"."PARAM_VALUE" LIKE '%KuduStorageHandler%' ); -- These lines need to be last. Insert any changes above. diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.postgres.sql deleted file mode 100644 index 1d285bbe842f..000000000000 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-beta-1-to-4.0.0-beta-2.postgres.sql +++ /dev/null @@ -1,9 +0,0 @@ -SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2'; - --- HIVE-24815: Remove "IDXS" Table from Metastore Schema -DROP TABLE IF EXISTS "INDEX_PARAMS"; -DROP TABLE IF EXISTS "IDXS"; - --- These lines need to be last. Insert any changes above. -UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0-beta-2', "VERSION_COMMENT"='Hive release version 4.0.0-beta-2' where "VER_ID"=1; -SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0-beta-2'; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-beta-1-to-4.0.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-beta-1-to-4.0.0.postgres.sql new file mode 100644 index 000000000000..8393ec260cea --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-beta-1-to-4.0.0.postgres.sql @@ -0,0 +1,14 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0'; + +-- HIVE-24815: Remove "IDXS" Table from Metastore Schema +DROP TABLE IF EXISTS "INDEX_PARAMS"; +DROP TABLE IF EXISTS "IDXS"; + +-- HIVE-27827 +ALTER TABLE ONLY "PARTITIONS" DROP CONSTRAINT "UNIQUEPARTITION"; +ALTER TABLE ONLY "PARTITIONS" ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("TBL_ID", "PART_NAME"); +DROP INDEX "PARTITIONS_N49"; + +-- These lines need to be last. Insert any changes above. +UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0-beta-1 to 4.0.0'; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql new file mode 100644 index 000000000000..13d7dd5b1072 --- /dev/null +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade-4.0.0-to-4.1.0.postgres.sql @@ -0,0 +1,5 @@ +SELECT 'Upgrading MetaStore schema from 4.0.0 to 4.1.0'; + +-- These lines need to be last. Insert any changes above. +UPDATE "VERSION" SET "SCHEMA_VERSION"='4.1.0', "VERSION_COMMENT"='Hive release version 4.1.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 4.0.0 to 4.1.0'; diff --git a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade.order.postgres b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade.order.postgres index 809b719e7324..58500aa07e6c 100644 --- a/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade.order.postgres +++ b/standalone-metastore/metastore-server/src/main/sql/postgres/upgrade.order.postgres @@ -19,4 +19,5 @@ 3.2.0-to-4.0.0-alpha-1 4.0.0-alpha-1-to-4.0.0-alpha-2 4.0.0-alpha-2-to-4.0.0-beta-1 -4.0.0-beta-1-to-4.0.0-beta-2 +4.0.0-beta-1-to-4.0.0 +4.0.0-to-4.1.0 diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyCustomRDBMS.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyCustomRDBMS.java index 053fbe4b2ec0..444edd8812ff 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyCustomRDBMS.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyCustomRDBMS.java @@ -98,7 +98,7 @@ public boolean supportsGetGeneratedKeys() { return true; } @Override - public boolean isDuplicateKeyError(SQLException ex) { + public boolean isDuplicateKeyError(Throwable t) { return true; } @Override diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index a12b989c7307..53889e2b6f07 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -141,7 +141,7 @@ * * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been * applied when this class's setConf method is called, by checking that the value of the - * METASTORECONNECTURLKEY ConfVar has been updated. + * METASTORE_CONNECT_URL_KEY ConfVar has been updated. * * All non-void methods return default values. */ diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java index 96695ee1eaaf..07b4f25c3bc1 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java @@ -1918,15 +1918,17 @@ public void flushCache() { public List getTableColumnStatistics(String dbName, String tableName, List colNames, String engine) throws NoSuchObjectException, MetaException, TException, InvalidInputException, InvalidObjectException { - return client.get_table_statistics_req( - new TableStatsRequest(dbName, tableName, colNames, engine)).getTableStats(); + TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames); + tsr.setEngine(engine); + return client.get_table_statistics_req(new TableStatsRequest(tsr)).getTableStats(); } @Override public List getTableColumnStatistics( String dbName, String tableName, List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { - TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames, engine); + TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames); + tsr.setEngine(engine); tsr.setValidWriteIdList(validWriteIdList); return client.get_table_statistics_req(tsr).getTableStats(); @@ -1937,8 +1939,9 @@ public List getTableColumnStatistics( public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames, String engine) throws NoSuchObjectException, MetaException, TException { - return client.get_partitions_statistics_req( - new PartitionsStatsRequest(dbName, tableName, colNames, partNames, engine)).getPartStats(); + PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames); + psr.setEngine(engine); + return client.get_partitions_statistics_req(new PartitionsStatsRequest(psr)).getPartStats(); } @Override @@ -1946,7 +1949,8 @@ public Map> getPartitionColumnStatistics( String dbName, String tableName, List partNames, List colNames, String engine, String validWriteIdList) throws NoSuchObjectException, MetaException, TException { - PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames, engine); + PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames); + psr.setEngine(engine); psr.setValidWriteIdList(validWriteIdList); return client.get_partitions_statistics_req( psr).getPartStats(); @@ -2945,7 +2949,8 @@ public AggrStats getAggrColStatsFor(String dbName, String tblName, LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames, engine); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + req.setEngine(engine); return client.get_aggr_stats_for(req); } @@ -2958,7 +2963,8 @@ public AggrStats getAggrColStatsFor( LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate } - PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName, engine); + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partName); + req.setEngine(engine); req.setValidWriteIdList(writeIdList); return client.get_aggr_stats_for(req); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java index 811932f23bae..8e44dc3a46e8 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMarkPartition.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder; import org.apache.hadoop.hive.metastore.client.builder.TableBuilder; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.thrift.TException; import org.junit.Assert; import org.junit.Before; @@ -57,6 +58,8 @@ public void setUp() throws Exception { @Test public void testMarkingPartitionSet() throws TException, InterruptedException { + EventCleanerTask cleanerTask = new EventCleanerTask(); + cleanerTask.setConf(conf); HiveMetaStoreClient msc = new HiveMetaStoreClient(conf); final String dbName = "hive2215"; @@ -83,7 +86,8 @@ public void testMarkingPartitionSet() throws TException, InterruptedException { kvs.put("b", "'2011'"); msc.markPartitionForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE); Assert.assertTrue(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE)); - Thread.sleep(10000); + Thread.sleep(3000); + cleanerTask.run(); Assert.assertFalse(msc.isPartitionMarkedForEvent(dbName, tableName, kvs, PartitionEventType.LOAD_DONE)); kvs.put("b", "'2012'"); diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java index 940b18d1db46..1da814dd0927 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -503,7 +503,7 @@ public void testPartitionOpsWhenTableDoesNotExist() throws InvalidObjectExceptio objectStore.alterPartitions(DEFAULT_CATALOG_NAME, DB1, "not_existed_table", part_vals, parts, 0, ""); } catch (MetaException e) { // expected - Assert.assertTrue(e.getCause() instanceof NoSuchObjectException); + Assert.assertEquals(e.getMessage(), "Specified catalog.database.table does not exist : hive.testobjectstoredb1.not_existed_table"); } } @@ -1181,10 +1181,10 @@ public void testConcurrentAddNotifications() throws ExecutionException, Interrup */ // conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver"); -// conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY, +// conf.setVar(HiveConf.ConfVars.METASTORE_CONNECT_URL_KEY, // "jdbc:mysql://localhost:3306/metastore_db"); // conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, ""); -// conf.setVar(HiveConf.ConfVars.METASTOREPWD, ""); +// conf.setVar(HiveConf.ConfVars.METASTORE_PWD, ""); /* we have to add this one manually as for tests the db is initialized via the metastoreDiretSQL @@ -1635,40 +1635,44 @@ protected Object getJdoResult(ObjectStore.GetHelper ctx) throws MetaExce @Test public void testNoJdoForUnrecoverableException() throws Exception { - objectStore.openTransaction(); - AtomicBoolean runDirectSql = new AtomicBoolean(false); - AtomicBoolean runJdo = new AtomicBoolean(false); - try { - objectStore.new GetHelper(DEFAULT_CATALOG_NAME, DB1, TABLE1, true, true) { - @Override - protected String describeResult() { - return "test not run jdo for unrecoverable exception"; - } + Exception[] unrecoverableExceptions = new Exception[] { + new SQLIntegrityConstraintViolationException("Unrecoverable ex"), + new DeadlineException("unrecoverable ex")}; + for (Exception unrecoverableException : unrecoverableExceptions) { + objectStore.openTransaction(); + AtomicBoolean runDirectSql = new AtomicBoolean(false); + AtomicBoolean runJdo = new AtomicBoolean(false); + try { + objectStore.new GetHelper(DEFAULT_CATALOG_NAME, DB1, TABLE1, true, true) { + @Override + protected String describeResult() { + return "test not run jdo for unrecoverable exception"; + } - @Override - protected Object getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { - runDirectSql.set(true); - SQLIntegrityConstraintViolationException ex = new SQLIntegrityConstraintViolationException("Unrecoverable ex"); - MetaException me = new MetaException("Throwing unrecoverable exception to test not run jdo."); - me.initCause(ex); - throw me; - } + @Override + protected Object getSqlResult(ObjectStore.GetHelper ctx) throws MetaException { + runDirectSql.set(true); + MetaException me = new MetaException("Throwing unrecoverable exception to test not run jdo."); + me.initCause(unrecoverableException); + throw me; + } - @Override - protected Object getJdoResult(ObjectStore.GetHelper ctx) throws MetaException, NoSuchObjectException { - runJdo.set(true); - SQLIntegrityConstraintViolationException ex = new SQLIntegrityConstraintViolationException("Unrecoverable ex"); - MetaException me = new MetaException("Throwing unrecoverable exception to test not run jdo."); - me.initCause(ex); - throw me; - } - }.run(false); - } catch (MetaException ex) { - // expected + @Override + protected Object getJdoResult(ObjectStore.GetHelper ctx) throws MetaException, NoSuchObjectException { + runJdo.set(true); + SQLIntegrityConstraintViolationException ex = new SQLIntegrityConstraintViolationException("Unrecoverable ex"); + MetaException me = new MetaException("Throwing unrecoverable exception to test not run jdo."); + me.initCause(ex); + throw me; + } + }.run(false); + } catch (MetaException ex) { + // expected + } + objectStore.commitTransaction(); + Assert.assertEquals(true, runDirectSql.get()); + Assert.assertEquals(false, runJdo.get()); } - objectStore.commitTransaction(); - Assert.assertEquals(true, runDirectSql.get()); - Assert.assertEquals(false, runJdo.get()); } /** diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartFilterExprUtil.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartFilterExprUtil.java index df1666ae5306..f4a7cfd58dac 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartFilterExprUtil.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestPartFilterExprUtil.java @@ -104,25 +104,25 @@ public void testMultiColInExpressionWhenDateLiteralTypeIsSpecified() throws Meta @Test public void testSingleColInExpressionWhenTimestampLiteralTypeIsNotSpecifiedNorQuoted() throws MetaException { checkFilter("(dt) IN (2000-01-01 01:00:00, 2000-01-01 01:42:00)", - "TreeNode{lhs=LeafNode{keyName='dt', operator='=', value=2000-01-01 01:00:00.0}, andOr='OR', rhs=LeafNode{keyName='dt', operator='=', value=2000-01-01 01:42:00.0}}"); + "TreeNode{lhs=LeafNode{keyName='dt', operator='=', value=2000-01-01 01:00:00}, andOr='OR', rhs=LeafNode{keyName='dt', operator='=', value=2000-01-01 01:42:00}}"); } @Test public void testSingleColInExpressionWhenTimestampLiteralTypeIsSpecified() throws MetaException { checkFilter("(j) IN (TIMESTAMP'2000-01-01 01:00:00', TIMESTAMP'2000-01-01 01:42:00')", - "TreeNode{lhs=LeafNode{keyName='j', operator='=', value=2000-01-01 01:00:00.0}, andOr='OR', rhs=LeafNode{keyName='j', operator='=', value=2000-01-01 01:42:00.0}}"); + "TreeNode{lhs=LeafNode{keyName='j', operator='=', value=2000-01-01 01:00:00}, andOr='OR', rhs=LeafNode{keyName='j', operator='=', value=2000-01-01 01:42:00}}"); } @Test public void testMultiColInExpressionWhenTimestampLiteralTypeIsNotSpecifiedNorQuoted() throws MetaException { checkFilter("(struct(ds1,ds2)) IN (struct(2000-05-08 01:00:00, 2001-04-08 01:00:00), struct(2000-05-09 01:00:00, 2001-04-09 01:00:00))", - "TreeNode{lhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-08 01:00:00.0}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-08 01:00:00.0}}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-09 01:00:00.0}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-09 01:00:00.0}}}"); + "TreeNode{lhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-08 01:00:00}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-08 01:00:00}}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-09 01:00:00}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-09 01:00:00}}}"); } @Test public void testMultiColInExpressionWhenTimestampLiteralTypeIsSpecified() throws MetaException { checkFilter("(struct(ds1,ds2)) IN (struct(TIMESTAMP'2000-05-08 01:00:00',TIMESTAMP'2001-04-08 01:00:00'), struct(TIMESTAMP'2000-05-09 01:00:00',TIMESTAMP'2001-04-09 01:00:00'))", - "TreeNode{lhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-08 01:00:00.0}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-08 01:00:00.0}}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-09 01:00:00.0}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-09 01:00:00.0}}}"); + "TreeNode{lhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-08 01:00:00}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-08 01:00:00}}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='ds1', operator='=', value=2000-05-09 01:00:00}, andOr='AND', rhs=LeafNode{keyName='ds2', operator='=', value=2001-04-09 01:00:00}}}"); } @Test @@ -140,13 +140,13 @@ public void testBetweenExpressionWhenDateLiteralTypeIsSpecified() throws MetaExc @Test public void testBetweenExpressionWhenTimestampLiteralTypeIsNotSpecifiedNorQuoted() throws MetaException { checkFilter("dt BETWEEN 2000-01-01 01:00:00 AND 2000-01-01 01:42:00)", - "TreeNode{lhs=LeafNode{keyName='dt', operator='>=', value=2000-01-01 01:00:00.0}, andOr='AND', rhs=LeafNode{keyName='dt', operator='<=', value=2000-01-01 01:42:00.0}}"); + "TreeNode{lhs=LeafNode{keyName='dt', operator='>=', value=2000-01-01 01:00:00}, andOr='AND', rhs=LeafNode{keyName='dt', operator='<=', value=2000-01-01 01:42:00}}"); } @Test public void testBetweenExpressionWhenTimestampLiteralTypeIsSpecified() throws MetaException { checkFilter("dt BETWEEN TIMESTAMP'2000-01-01 01:00:00' AND TIMESTAMP'2000-01-01 01:42:00')", - "TreeNode{lhs=LeafNode{keyName='dt', operator='>=', value=2000-01-01 01:00:00.0}, andOr='AND', rhs=LeafNode{keyName='dt', operator='<=', value=2000-01-01 01:42:00.0}}"); + "TreeNode{lhs=LeafNode{keyName='dt', operator='>=', value=2000-01-01 01:00:00}, andOr='AND', rhs=LeafNode{keyName='dt', operator='<=', value=2000-01-01 01:42:00}}"); } @Test @@ -164,13 +164,13 @@ public void testBinaryExpressionWhenDateLiteralTypeIsSpecified() throws MetaExce @Test public void testBinaryExpressionWhenTimeStampLiteralTypeIsNotSpecifiedNorQuoted() throws MetaException { checkFilter("(j = 1990-11-10 01:00:00 or j = 1990-11-11 01:00:24 and j = 1990-11-12 01:42:00)", - "TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-10 01:00:00.0}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-11 01:00:24.0}, andOr='AND', rhs=LeafNode{keyName='j', operator='=', value=1990-11-12 01:42:00.0}}}"); + "TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-10 01:00:00}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-11 01:00:24}, andOr='AND', rhs=LeafNode{keyName='j', operator='=', value=1990-11-12 01:42:00}}}"); } @Test public void testBinaryExpressionWhenTimeStampLiteralTypeIsSpecified() throws MetaException { checkFilter("(j = TIMESTAMP'1990-11-10 01:00:00' or j = TIMESTAMP'1990-11-11 01:00:24' and j = TIMESTAMP'1990-11-12 01:42:00')", - "TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-10 01:00:00.0}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-11 01:00:24.0}, andOr='AND', rhs=LeafNode{keyName='j', operator='=', value=1990-11-12 01:42:00.0}}}"); + "TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-10 01:00:00}, andOr='OR', rhs=TreeNode{lhs=LeafNode{keyName='j', operator='=', value=1990-11-11 01:00:24}, andOr='AND', rhs=LeafNode{keyName='j', operator='=', value=1990-11-12 01:42:00}}}"); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java index 4c4905deb0aa..658677751c7d 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestRetriesInRetryingHMSHandler.java @@ -85,7 +85,7 @@ public void testNoRetryInit() throws MetaException { /* * If the init method in HMSHandler throws exception all the times it should be retried until - * HiveConf.ConfVars.HMSHANDLERATTEMPTS is reached before giving up + * HiveConf.ConfVars.HMS_HANDLER_ATTEMPTS is reached before giving up */ @Test public void testRetriesLimit() throws MetaException { diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java index 0d38b628abb7..cf4fc482808e 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/client/TestTablesCreateDropAlterTruncate.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hive.metastore.client; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.common.TableName; import org.apache.hadoop.hive.metastore.ColumnType; @@ -68,6 +71,7 @@ import org.junit.runners.Parameterized; import java.io.File; +import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; @@ -118,6 +122,7 @@ public static void startMetaStores() { extraConf.put("fs.trash.interval", "30"); // FS_TRASH_INTERVAL_KEY (hadoop-2) extraConf.put(ConfVars.HIVE_IN_TEST.getVarname(), "true"); extraConf.put(ConfVars.METASTORE_METADATA_TRANSFORMER_CLASS.getVarname(), " "); + extraConf.put(ConfVars.AUTHORIZATION_STORAGE_AUTH_CHECKS.getVarname(), "true"); startMetaStores(msConf, extraConf); } @@ -1563,6 +1568,41 @@ public void dropTableBogusCatalog() throws TException { client.dropTable("nosuch", testTables[0].getDbName(), testTables[0].getTableName(), true, false); } + @Test(expected = MetaException.class) + public void testDropManagedTableWithoutStoragePermission() throws TException, IOException { + String dbName = testTables[0].getDbName(); + String tblName = testTables[0].getTableName(); + Table table = client.getTable(dbName, tblName); + Path tablePath = new Path(table.getSd().getLocation()); + FileSystem fs = Warehouse.getFs(tablePath, new Configuration()); + fs.setPermission(tablePath.getParent(), new FsPermission((short) 0555)); + + try { + client.dropTable(dbName, tblName); + } finally { + // recover write permission so that file can be cleaned. + fs.setPermission(tablePath.getParent(), new FsPermission((short) 0755)); + } + } + + @Test + public void testDropExternalTableWithoutStoragePermission() throws TException, IOException { + // external table + String dbName = testTables[4].getDbName(); + String tblName = testTables[4].getTableName(); + Table table = client.getTable(dbName, tblName); + Path tablePath = new Path(table.getSd().getLocation()); + FileSystem fs = Warehouse.getFs(tablePath, new Configuration()); + fs.setPermission(tablePath.getParent(), new FsPermission((short) 0555)); + + try { + client.dropTable(dbName, tblName); + } finally { + // recover write permission so that file can be cleaned. + fs.setPermission(tablePath.getParent(), new FsPermission((short) 0755)); + } + } + /** * Creates a Table with all of the parameters set. The temporary table is available only on HS2 * server, so do not use it. diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java index a5a8145ccbd0..b5208c7fe75a 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/conf/TestMetastoreConf.java @@ -20,7 +20,8 @@ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest; import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hive.metastore.txn.AcidTxnCleanerService; +import org.apache.hadoop.hive.metastore.txn.service.CompactionHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidTxnCleanerService; import org.hamcrest.CoreMatchers; import org.hamcrest.core.StringContains; import org.hamcrest.core.StringEndsWith; @@ -50,8 +51,8 @@ import org.apache.hadoop.hive.metastore.SerDeStorageSchemaReader; import org.apache.hadoop.hive.metastore.events.EventCleanerTask; import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager; -import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; -import org.apache.hadoop.hive.metastore.txn.AcidOpenTxnsCounterService; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidOpenTxnsCounterService; @Category(MetastoreUnitTest.class) public class TestMetastoreConf { @@ -500,8 +501,10 @@ public void testClassNames() { EventCleanerTask.class.getName()); Assert.assertEquals(MetastoreConf.METASTORE_DELEGATION_MANAGER_CLASS, MetastoreDelegationTokenManager.class.getName()); - Assert.assertEquals(MetastoreConf.ACID_HOUSE_KEEPER_SERVICE_CLASS, + Assert.assertEquals(MetastoreConf.ACID_HOUSEKEEPER_SERVICE_CLASS, AcidHouseKeeperService.class.getName()); + Assert.assertEquals(MetastoreConf.COMPACTION_HOUSEKEEPER_SERVICE_CLASS, + CompactionHouseKeeperService.class.getName()); Assert.assertEquals(MetastoreConf.ACID_TXN_CLEANER_SERVICE_CLASS, AcidTxnCleanerService.class.getName()); Assert.assertEquals(MetastoreConf.ACID_OPEN_TXNS_COUNTER_SERVICE_CLASS, diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderElection.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderElection.java index 99d13c00b4a7..59f7dbc8fd64 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderElection.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/leader/TestLeaderElection.java @@ -27,6 +27,9 @@ import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.junit.Test; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -52,6 +55,28 @@ public void testConfigLeaderElection() throws Exception { assertFalse(election.isLeader()); } + static class TestLeaderListener implements LeaderElection.LeadershipStateListener { + AtomicBoolean flag; + TestLeaderListener(AtomicBoolean flag) { + this.flag = flag; + } + @Override + public void takeLeadership(LeaderElection election) throws Exception { + synchronized (flag) { + flag.set(true); + flag.notifyAll(); + } + } + + @Override + public void lossLeadership(LeaderElection election) throws Exception { + synchronized (flag) { + flag.set(false); + flag.notifyAll(); + } + } + } + @Test public void testLeaseLeaderElection() throws Exception { Configuration configuration = MetastoreConf.newMetastoreConf(); @@ -68,16 +93,7 @@ public void testLeaseLeaderElection() throws Exception { TableName mutex = new TableName("hive", "default", "leader_lease_ms"); LeaseLeaderElection instance1 = new LeaseLeaderElection(); AtomicBoolean flag1 = new AtomicBoolean(false); - instance1.addStateListener(new LeaderElection.LeadershipStateListener() { - @Override - public void takeLeadership(LeaderElection election) { - flag1.set(true); - } - @Override - public void lossLeadership(LeaderElection election) { - flag1.set(false); - } - }); + instance1.addStateListener(new TestLeaderListener(flag1)); instance1.tryBeLeader(configuration, mutex); // elect1 as a leader now assertTrue(flag1.get() && instance1.isLeader()); @@ -85,31 +101,22 @@ public void lossLeadership(LeaderElection election) { configuration.setBoolean(LeaseLeaderElection.METASTORE_RENEW_LEASE, true); LeaseLeaderElection instance2 = new LeaseLeaderElection(); AtomicBoolean flag2 = new AtomicBoolean(false); - instance2.addStateListener(new LeaderElection.LeadershipStateListener() { - @Override - public void takeLeadership(LeaderElection election) { - flag2.set(true); - } - @Override - public void lossLeadership(LeaderElection election) { - flag2.set(false); - } - }); + instance2.addStateListener(new TestLeaderListener(flag2)); instance2.tryBeLeader(configuration, mutex); - // instance2 should not be leader as elect1 holds the lease assertFalse(flag2.get() || instance2.isLeader()); - Thread.sleep(15 * 1000); + + ExecutorService service = Executors.newFixedThreadPool(4); + wait(service, flag1, flag2); // now instance1 lease is timeout, the instance2 should be leader now assertTrue(instance2.isLeader() && flag2.get()); - assertFalse(flag1.get() || instance1.isLeader()); assertTrue(flag2.get() && instance2.isLeader()); + // remove leader's lease (instance2) long lockId2 = instance2.getLockId(); txnStore.unlock(new UnlockRequest(lockId2)); - Thread.sleep(4 * 1000); - assertTrue(flag1.get() && instance1.isLeader()); + wait(service, flag1, flag2); assertFalse(flag2.get() || instance2.isLeader()); assertTrue(lockId2 > 0); assertFalse(instance2.getLockId() == lockId2); @@ -117,7 +124,7 @@ public void lossLeadership(LeaderElection election) { // remove leader's lease(instance1) long lockId1 = instance1.getLockId(); txnStore.unlock(new UnlockRequest(lockId1)); - Thread.sleep(4 * 1000); + wait(service, flag1, flag2); assertFalse(lockId1 == instance1.getLockId()); assertTrue(lockId1 > 0); @@ -128,4 +135,23 @@ public void lossLeadership(LeaderElection election) { } } + private void wait(ExecutorService service, Object... obj) throws Exception { + Future[] fs = new Future[obj.length]; + for (int i = 0; i < obj.length; i++) { + Object monitor = obj[i]; + fs[i] = service.submit(() -> { + try { + synchronized (monitor) { + monitor.wait(); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + } + for (Future f : fs) { + f.get(); + } + } + } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaToolCommandLine.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaToolCommandLine.java index ab090c9caa16..eb72af0bf534 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaToolCommandLine.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestHiveMetaToolCommandLine.java @@ -87,7 +87,7 @@ public void testParseUpdateLocation() throws ParseException { @Test public void testNoTask() throws ParseException { exception.expect(IllegalArgumentException.class); - exception.expectMessage("exactly one of -listFSRoot, -executeJDOQL, -updateLocation, -listExtTblLocs, -diffExtTblLocs must be set"); + exception.expectMessage("exactly one of -listFSRoot, -executeJDOQL, -updateLocation, -listExtTblLocs, -diffExtTblLocs, -metadataSummary must be set"); new HiveMetaToolCommandLine(new String[] {}); } @@ -95,7 +95,7 @@ public void testNoTask() throws ParseException { @Test public void testMultipleTask() throws ParseException { exception.expect(IllegalArgumentException.class); - exception.expectMessage("exactly one of -listFSRoot, -executeJDOQL, -updateLocation, -listExtTblLocs, -diffExtTblLocs must be set"); + exception.expectMessage("exactly one of -listFSRoot, -executeJDOQL, -updateLocation, -listExtTblLocs, -diffExtTblLocs, -metadataSummary must be set"); new HiveMetaToolCommandLine(new String[] {"-listFSRoot", "-executeJDOQL", "select a from b"}); } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java index e82b943915bf..804bf156dc62 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolForMetastore.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.dbinstall.rules.DatabaseRule; import org.apache.hadoop.hive.metastore.dbinstall.rules.Derby; +import org.apache.hadoop.hive.metastore.dbinstall.rules.Mariadb; import org.apache.hadoop.hive.metastore.dbinstall.rules.Mssql; import org.apache.hadoop.hive.metastore.dbinstall.rules.Mysql; import org.apache.hadoop.hive.metastore.dbinstall.rules.Oracle; @@ -85,7 +86,7 @@ public static Collection databases() { dbs.add(new Object[] { new Mysql() }); dbs.add(new Object[] { new Oracle() }); dbs.add(new Object[] { new Postgres() }); -// dbs.add(new Object[] { new Mariadb() }); Disabled due to HIVE-27749 + dbs.add(new Object[] { new Mariadb() }); dbs.add(new Object[] { new Mssql() }); return dbs; } diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/TestAcidTxnCleanerService.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/TestAcidTxnCleanerService.java index 178b68fdfce1..0e27bf393f4f 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/TestAcidTxnCleanerService.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/TestAcidTxnCleanerService.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; +import org.apache.hadoop.hive.metastore.txn.service.AcidTxnCleanerService; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.junit.After; import org.junit.Assert; diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java new file mode 100644 index 000000000000..3948faaaadc4 --- /dev/null +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/txn/retry/TestSqlRetryHandler.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.metastore.txn.retry; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.junit.Assert; +import org.junit.Test; + +import java.sql.SQLException; + +public class TestSqlRetryHandler { + + @Test + public void testRetryableRegex() { + HiveConf conf = new HiveConf(); + SQLException sqlException = new SQLException("ORA-08177: can't serialize access for this transaction", "72000"); + // Note that we have 3 regex'es below + conf.setVar(HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, "^Deadlock detected, roll back,.*08177.*,.*08178.*"); + boolean result = SqlRetryHandler.isRetryable(conf, sqlException); + Assert.assertTrue("regex should be retryable", result); + + sqlException = new SQLException("This error message, has comma in it"); + conf.setVar(HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, ".*comma.*"); + result = SqlRetryHandler.isRetryable(conf, sqlException); + Assert.assertTrue("regex should be retryable", result); + } + +} diff --git a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTxnDbUtil.java b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTxnDbUtil.java index 6d7fe31b719d..d369e3d65e68 100644 --- a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTxnDbUtil.java +++ b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestTxnDbUtil.java @@ -338,10 +338,24 @@ public static int countQueryAgent(Configuration conf, String countQuery) throws closeResources(conn, stmt, rs); } } + public static String queryToString(Configuration conf, String query) throws Exception { return queryToString(conf, query, true); } - public static String queryToString(Configuration conf, String query, boolean includeHeader) + + public static String queryToString(Configuration conf, String query, boolean includeHeader) throws Exception { + return queryToString(conf, query, includeHeader, " "); + } + + public static String queryToCsv(Configuration conf, String query) throws Exception { + return queryToString(conf, query, true, ","); + } + + public static String queryToCsv(Configuration conf, String query, boolean includeHeader) throws Exception { + return queryToString(conf, query, includeHeader, ","); + } + + public static String queryToString(Configuration conf, String query, boolean includeHeader, String columnSeparator) throws Exception { Connection conn = null; Statement stmt = null; @@ -354,13 +368,13 @@ public static String queryToString(Configuration conf, String query, boolean inc ResultSetMetaData rsmd = rs.getMetaData(); if(includeHeader) { for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) { - sb.append(rsmd.getColumnName(colPos)).append(" "); + sb.append(rsmd.getColumnName(colPos)).append(columnSeparator); } sb.append('\n'); } while(rs.next()) { for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) { - sb.append(rs.getObject(colPos)).append(" "); + sb.append(rs.getObject(colPos)).append(columnSeparator); } sb.append('\n'); } @@ -369,7 +383,7 @@ public static String queryToString(Configuration conf, String query, boolean inc } return sb.toString(); } - + /** * This is only for testing, it does not use the connectionPool from TxnHandler! * @param conf diff --git a/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0-beta-2.postgres.sql b/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.1.0.postgres.sql similarity index 95% rename from standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0-beta-2.postgres.sql rename to standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.1.0.postgres.sql index aec13352cac2..385451ea4824 100644 --- a/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.0.0-beta-2.postgres.sql +++ b/standalone-metastore/metastore-server/src/test/resources/sql/postgres/upgrade-3.1.3000-to-4.1.0.postgres.sql @@ -1,5 +1,5 @@ -- The file has some overlapping with upgrade-3.2.0-to-4.0.0.postgres.sql -SELECT 'Upgrading MetaStore schema from 3.1.3000 to 4.0.0-beta-2'; +SELECT 'Upgrading MetaStore schema from 3.1.3000 to 4.1.0'; -- HIVE-20793 ALTER TABLE "WM_RESOURCEPLAN" ADD "NS" character varying(128); @@ -160,5 +160,5 @@ CREATE TABLE "MIN_HISTORY_WRITE_ID" ( ); -- These lines need to be last. Insert any changes above. -UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0-beta-2', "VERSION_COMMENT"='Hive release version 4.0.0-beta-2' where "VER_ID"=1; -SELECT 'Finished upgrading MetaStore schema from 3.1.3000 to 4.0.0-beta-2'; +UPDATE "VERSION" SET "SCHEMA_VERSION"='4.1.0', "VERSION_COMMENT"='Hive release version 4.1.0' where "VER_ID"=1; +SELECT 'Finished upgrading MetaStore schema from 3.1.3000 to 4.1.0'; diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/pom.xml b/standalone-metastore/metastore-tools/metastore-benchmarks/pom.xml index 72323540ddbe..e997d31afe04 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/pom.xml +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/pom.xml @@ -16,7 +16,7 @@ hive-metastore-tools org.apache.hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT 4.0.0 jar diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java index 90672bf483db..93556e7f0fa1 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/BenchmarkTool.java @@ -45,6 +45,7 @@ import java.util.regex.Pattern; import static org.apache.hadoop.hive.metastore.tools.Constants.HMS_DEFAULT_PORT; +import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkAlterPartitions; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkCreatePartition; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkCreatePartitions; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkDeleteCreate; @@ -56,6 +57,7 @@ import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetNotificationId; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitionNames; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitions; +import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitionsByFilter; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetPartitionsByName; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkGetTable; import static org.apache.hadoop.hive.metastore.tools.HMSBenchmarks.benchmarkListAllTables; @@ -283,6 +285,8 @@ private void runNonAcidBenchmarks() { () -> benchmarkGetPartitionNames(bench, bData, 1)) .add("getPartitionsByNames", () -> benchmarkGetPartitionsByName(bench, bData, 1)) + .add("getPartitionsByFilter", + () -> benchmarkGetPartitionsByFilter(bench, bData, 1)) .add("renameTable", () -> benchmarkRenameTable(bench, bData, 1)) .add("dropDatabase", @@ -307,10 +311,14 @@ private void runNonAcidBenchmarks() { () -> benchmarkGetPartitionNames(bench, bData, howMany)) .add("getPartitionsByNames" + '.' + howMany, () -> benchmarkGetPartitionsByName(bench, bData, howMany)) + .add("getPartitionsByFilter" + '.' + howMany, + () -> benchmarkGetPartitionsByFilter(bench, bData, howMany)) .add("addPartitions" + '.' + howMany, () -> benchmarkCreatePartitions(bench, bData, howMany)) .add("dropPartitions" + '.' + howMany, () -> benchmarkDropPartitions(bench, bData, howMany)) + .add("alterPartitions" + '.' + howMany, + () -> benchmarkAlterPartitions(bench, bData, howMany)) .add("renameTable" + '.' + howMany, () -> benchmarkRenameTable(bench, bData, howMany)) .add("dropDatabase" + '.' + howMany, diff --git a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java index 214e9e1cd6bb..fdab07178354 100644 --- a/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java +++ b/standalone-metastore/metastore-tools/metastore-benchmarks/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSBenchmarks.java @@ -23,6 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.PartitionManagementTask; import org.apache.hadoop.hive.metastore.TableType; +import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.thrift.TException; @@ -338,6 +339,35 @@ static DescriptiveStatistics benchmarkDropPartitions(@NotNull MicroBenchmark ben } } + static DescriptiveStatistics benchmarkAlterPartitions(@NotNull MicroBenchmark bench, + @NotNull BenchData data, + int count) { + final HMSClient client = data.getClient(); + String dbName = data.dbName; + String tableName = data.tableName; + + BenchmarkUtils.createPartitionedTable(client, dbName, tableName); + try { + return bench.measure( + () -> addManyPartitionsNoException(client, dbName, tableName, null, + Collections.singletonList("d"), count), + () -> throwingSupplierWrapper(() -> { + List newPartitions = client.getPartitions(dbName, tableName); + newPartitions.forEach(p -> { + p.getParameters().put("new_param", "param_val"); + p.getSd().setCols(Arrays.asList(new FieldSchema("new_col", "string", null))); + }); + client.alterPartitions(dbName, tableName, newPartitions); + return null; + }), + () -> throwingSupplierWrapper(() -> + client.dropPartitions(dbName, tableName, null)) + ); + } finally { + throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); + } + } + static DescriptiveStatistics benchmarkGetPartitionNames(@NotNull MicroBenchmark bench, @NotNull BenchData data, int count) { @@ -380,6 +410,27 @@ static DescriptiveStatistics benchmarkGetPartitionsByName(@NotNull MicroBenchmar } } + static DescriptiveStatistics benchmarkGetPartitionsByFilter(@NotNull MicroBenchmark bench, + @NotNull BenchData data, + int count) { + final HMSClient client = data.getClient(); + String dbName = data.dbName; + String tableName = data.tableName; + + BenchmarkUtils.createPartitionedTable(client, dbName, tableName); + try { + addManyPartitionsNoException(client, dbName, tableName, null, + Collections.singletonList("d"), count); + return bench.measure( + () -> + throwingSupplierWrapper(() -> + client.getPartitionsByFilter(dbName, tableName, "`date`='d0'")) + ); + } finally { + throwingSupplierWrapper(() -> client.dropTable(dbName, tableName)); + } + } + static DescriptiveStatistics benchmarkRenameTable(@NotNull MicroBenchmark bench, @NotNull BenchData data, int count) { diff --git a/standalone-metastore/metastore-tools/pom.xml b/standalone-metastore/metastore-tools/pom.xml index 551647d5c708..546485362546 100644 --- a/standalone-metastore/metastore-tools/pom.xml +++ b/standalone-metastore/metastore-tools/pom.xml @@ -16,7 +16,7 @@ 4.0.0 hive-standalone-metastore - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT org.apache.hive hive-metastore-tools diff --git a/standalone-metastore/metastore-tools/tools-common/pom.xml b/standalone-metastore/metastore-tools/tools-common/pom.xml index 9c253d1950b1..f9919757c9c3 100644 --- a/standalone-metastore/metastore-tools/tools-common/pom.xml +++ b/standalone-metastore/metastore-tools/tools-common/pom.xml @@ -16,7 +16,7 @@ hive-metastore-tools org.apache.hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT 4.0.0 jar diff --git a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java index 6b7e2a450b18..61580aa7b225 100644 --- a/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java +++ b/standalone-metastore/metastore-tools/tools-common/src/main/java/org/apache/hadoop/hive/metastore/tools/HMSClient.java @@ -329,6 +329,11 @@ List getPartitionsByNames(@NotNull String dbName, @NotNull String tab return client.get_partitions_by_names(dbName, tableName, names); } + List getPartitionsByFilter(@NotNull String dbName, @NotNull String tableName, + @NotNull String filter) throws TException { + return client.get_partitions_by_filter(dbName, tableName, filter, (short) -1); + } + boolean alterTable(@NotNull String dbName, @NotNull String tableName, @NotNull Table newTable) throws TException { client.alter_table(dbName, tableName, newTable); diff --git a/standalone-metastore/pom.xml b/standalone-metastore/pom.xml index caba6ea7f230..49c4c393f55c 100644 --- a/standalone-metastore/pom.xml +++ b/standalone-metastore/pom.xml @@ -21,7 +21,7 @@ org.apache.hive hive-standalone-metastore - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT pom Hive Standalone Metastore @@ -30,8 +30,8 @@ metastore-tools - 4.0.0-beta-2-SNAPSHOT - 4.0.0-beta-2 + 4.1.0-SNAPSHOT + 4.1.0 . UTF-8 @@ -79,7 +79,7 @@ 22.0 3.3.6 4.0.3 - 2.13.5 + 2.16.1 3.3 5.5.1 4.13.2 @@ -90,16 +90,16 @@ 2.18.0 3.3.3 1.8.5 - 3.21.7 + 3.24.4 1.51.0 1.9.0 2.14.6 4.0.4 - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT 1.9.4 1.3 5.2.0 - 3.7.2 + 3.8.3 9.1.6 4.0.3 2.8.4 @@ -108,9 +108,10 @@ 4.5.13 4.5.5 9.31 - 9.4.40.v20210413 + 9.4.45.v20220203 1.3.2 - 5.2.24.RELEASE + + 5.3.21 you-must-set-this-to-run-thrift ${basedir}/src/gen/thrift diff --git a/storage-api/pom.xml b/storage-api/pom.xml index 5310a980b452..34315e7f861e 100644 --- a/storage-api/pom.xml +++ b/storage-api/pom.xml @@ -21,7 +21,7 @@ org.apache.hive hive-storage-api - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT jar Hive Storage API diff --git a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java index b6d3184ffedb..34097167ac13 100644 --- a/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java +++ b/storage-api/src/java/org/apache/hadoop/hive/ql/exec/vector/expressions/StringExpr.java @@ -342,7 +342,15 @@ public int find(byte[] input, int start, int len) { } s_tmp--; } - next += shift[input[next] & MAX_BYTE]; + + // if the character string contains control characters, + // overflow occurs. + int shiftIndex = input[next] & MAX_BYTE; + if (shiftIndex >= MAX_BYTE) { + next++; + } else { + next += shift[shiftIndex]; + } } return -1; } diff --git a/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestStringExpr.java b/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestStringExpr.java index 6fb66115277d..483eb68b9795 100644 --- a/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestStringExpr.java +++ b/storage-api/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestStringExpr.java @@ -20,9 +20,11 @@ import org.junit.Test; -import java.nio.charset.StandardCharsets; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.*; +import java.io.ByteArrayOutputStream; +import java.nio.charset.StandardCharsets; public class TestStringExpr { @Test @@ -49,6 +51,24 @@ public void test() throws Exception { assertEquals("Testing match at end of string", 24, find(pattern, input4)); } + @Test + public void testControlCharacters() throws Exception { + StringExpr.Finder pattern = compile("pattern"); + assertNotNull(pattern); + + byte b = -1; + byte[] controlBytes1 = "abcedf".getBytes(StandardCharsets.UTF_8); + byte[] controlBytes2 = "pattern".getBytes(StandardCharsets.UTF_8); + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + outputStream.write(controlBytes1); + outputStream.write(b); + outputStream.write(controlBytes2); + byte[] controlChar = outputStream.toByteArray(); + outputStream.close(); + + assertEquals("Testing valid match", 7, pattern.find(controlChar, 0, controlChar.length)); + } + private StringExpr.Finder compile(String pattern) { return StringExpr.compile(pattern.getBytes(StandardCharsets.UTF_8)); } diff --git a/streaming/pom.xml b/streaming/pom.xml index 456e6f069e8d..200d7f44f3a3 100644 --- a/streaming/pom.xml +++ b/streaming/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-streaming @@ -129,7 +129,7 @@ org.apache.hive hive-standalone-metastore-server - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT tests test diff --git a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java index 6d78e70cffbb..73accd90b034 100644 --- a/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java +++ b/streaming/src/java/org/apache/hive/streaming/AbstractRecordWriter.java @@ -147,7 +147,7 @@ public void init(StreamingConnection conn, long minWriteId, long maxWriteId, this.curBatchMaxWriteId = maxWriteId; this.statementId = statementId; this.conf = conn.getHiveConf(); - this.defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + this.defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); this.table = conn.getTable(); String location = table.getSd().getLocation(); try { diff --git a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java index a61beb55b8fd..41e356b93da3 100644 --- a/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java +++ b/streaming/src/java/org/apache/hive/streaming/HiveStreamingConnection.java @@ -827,7 +827,7 @@ private void overrideConfSettings(HiveConf conf) { setHiveConf(conf, HiveConf.ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); setHiveConf(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); setHiveConf(conf, MetastoreConf.ConfVars.EXECUTE_SET_UGI.getHiveName()); - setHiveConf(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); + setHiveConf(conf, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); if (streamingOptimizations) { setHiveConf(conf, HiveConf.ConfVars.HIVE_ORC_DELTA_STREAMING_OPTIMIZATIONS_ENABLED, true); } diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java index 37c64429b78d..5ee780005a65 100644 --- a/streaming/src/test/org/apache/hive/streaming/TestStreaming.java +++ b/streaming/src/test/org/apache/hive/streaming/TestStreaming.java @@ -73,7 +73,7 @@ import org.apache.hadoop.hive.metastore.api.TxnInfo; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.metastore.conf.MetastoreConf; -import org.apache.hadoop.hive.metastore.txn.AcidHouseKeeperService; +import org.apache.hadoop.hive.metastore.txn.service.AcidHouseKeeperService; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.metastore.utils.TestTxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; @@ -118,6 +118,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +@Ignore("HIVE-24219") public class TestStreaming { private static final Logger LOG = LoggerFactory.getLogger(TestStreaming.class); @@ -1365,12 +1366,15 @@ public void testTimeOutReaper() throws Exception { .withRecordWriter(writer) .withHiveConf(conf) .connect(); - - connection.beginTransaction(); + + HiveConf houseKeeperConf = new HiveConf(conf); //ensure txn timesout - conf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS); + houseKeeperConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 100, TimeUnit.MILLISECONDS); AcidHouseKeeperService houseKeeperService = new AcidHouseKeeperService(); - houseKeeperService.setConf(conf); + houseKeeperService.setConf(houseKeeperConf); + + connection.beginTransaction(); + Thread.sleep(150); houseKeeperService.run(); try { //should fail because the TransactionBatch timed out @@ -1389,6 +1393,7 @@ public void testTimeOutReaper() throws Exception { connection.beginTransaction(); connection.commitTransaction(); connection.beginTransaction(); + Thread.sleep(150); houseKeeperService.run(); try { //should fail because the TransactionBatch timed out diff --git a/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java b/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java index c548ea7388a6..c8c7a8e26db0 100644 --- a/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java +++ b/streaming/src/test/org/apache/hive/streaming/TestStreamingDynamicPartitioning.java @@ -513,7 +513,7 @@ public void testDPTwoLevelNonStringPartitionColumns() throws Exception { assertEquals("7\tfoo\t" + defaultPartitionName + "\t" + defaultPartitionName, res.get(6)); assertEquals("8\tbar\t" + defaultPartitionName + "\t12", res.get(7)); - defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); res = queryTable(driver, "show partitions " + (dbName + "." + tblName)); assertEquals(5, res.size()); assertTrue(res.contains("year=2018/month=2")); @@ -573,7 +573,7 @@ public void testWriteBeforeBegin() throws Exception { assertTrue(exception.getMessage().equals("Transaction state is not OPEN. Missing beginTransaction?")); connection.close(); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List res = queryTable(driver, "select * from " + (dbName + "." + tblName) + " order by id"); assertEquals(2, res.size()); assertEquals("1\tfoo\tAsia\t" + defaultPartitionName, res.get(0)); @@ -707,7 +707,7 @@ public void testWriteAfterClose() throws Exception { assertNotNull(exception); assertTrue(exception.getMessage().endsWith("Streaming connection is closed already.")); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List res = queryTable(driver, "select * from " + (dbName + "." + tblName) + " order by id"); assertEquals(2, res.size()); assertEquals("1\tfoo\tAsia\t" + defaultPartitionName, res.get(0)); @@ -758,7 +758,7 @@ public void testWriteAfterAbort() throws Exception { } assertNotNull(exception); assertTrue(exception.getMessage().equals("Streaming connection is closed already.")); - String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULTPARTITIONNAME); + String defaultPartitionName = conf.getVar(HiveConf.ConfVars.DEFAULT_PARTITION_NAME); List res = queryTable(driver, "select * from " + (dbName + "." + tblName) + " order by id"); assertEquals(3, res.size()); assertEquals("1\tfoo\tAsia\t" + defaultPartitionName, res.get(0)); diff --git a/testutils/pom.xml b/testutils/pom.xml index 316287e51e97..0d62f283e872 100644 --- a/testutils/pom.xml +++ b/testutils/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-testutils diff --git a/udf/pom.xml b/udf/pom.xml index 255feca1515c..6929cd74cfcb 100644 --- a/udf/pom.xml +++ b/udf/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-udf diff --git a/upgrade-acid/pom.xml b/upgrade-acid/pom.xml deleted file mode 100644 index d3fb9b28545f..000000000000 --- a/upgrade-acid/pom.xml +++ /dev/null @@ -1,68 +0,0 @@ - - - - 4.0.0 - - - org.apache - apache - 23 - - - org.apache.hive - hive-upgrade-acid - 4.0.0-beta-2-SNAPSHOT - Hive Upgrade Acid - pom - - - UTF-8 - 1.8 - 1.8 - false - ${settings.localRepository} - .. - - 1.0b3 - ${basedir}/${hive.path.to.root}/checkstyle - 2.17 - 5.6.2 - 5.6.2 - 2.16.0 - 3.0.0-M4 - - - pre-upgrade - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - ${maven.checkstyle.plugin.version} - - ${checkstyle.conf.dir}/checkstyle.xml - config_loc=${checkstyle.conf.dir} - true - - - - org.codehaus.mojo - versions-maven-plugin - ${maven.versions.plugin.version} - - - - diff --git a/upgrade-acid/pre-upgrade/pom.xml b/upgrade-acid/pre-upgrade/pom.xml deleted file mode 100644 index 989205bd80aa..000000000000 --- a/upgrade-acid/pre-upgrade/pom.xml +++ /dev/null @@ -1,360 +0,0 @@ - - - - - org.apache.hive - hive-upgrade-acid - 4.0.0-beta-2-SNAPSHOT - ../pom.xml - - 4.0.0 - - hive-pre-upgrade - Hive Pre Upgrade Acid - jar - - ../.. - - ${project.build.directory}/testconf - file:// - ${project.basedir}/src/test/resources - ${project.build.directory}/tmp - ${project.build.directory}/warehouse - file:// - 1 - true - 2.3.3 - 2.7.2 - - - - - commons-cli - commons-cli - 1.2 - provided - - - org.apache.hive - hive-metastore - ${hdp.hive.version} - provided - - - tomcat - jasper-compiler - - - tomcat - jasper-runtime - - - org.apache.zookeeper - zookeeper - - - org.apache.curator - curator-framework - - - jdk.tools - jdk.tools - - - - - org.apache.hive - hive-exec - ${hdp.hive.version} - provided - - - org.codehaus.groovy - groovy-all - - - org.apache.zookeeper - zookeeper - - - org.apache.curator - curator-framework - - - org.pentaho - pentaho-aggdesigner-algorithm - - - - - org.apache.hadoop - hadoop-common - ${hdp.hadoop.version} - provided - - - commons-beanutils - commons-beanutils - - - org.apache.zookeeper - zookeeper - - - org.apache.curator - curator-client - - - org.apache.curator - curator-recipes - - - org.apache.curator - curator-framework - - - - - - org.apache.hadoop - hadoop-mapreduce-client-common - 2.7.2 - provided - - - org.apache.zookeeper - zookeeper - - - io.netty - netty - - - - - org.apache.orc - orc-core - 1.3.3 - provided - - - org.junit.jupiter - junit-jupiter-engine - ${junit.jupiter.version} - test - - - org.junit.vintage - junit-vintage-engine - ${junit.vintage.version} - test - - - - - - ${basedir}/src/main/resources - - package.jdo - - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - ant-contrib - ant-contrib - ${ant.contrib.version} - - - ant - ant - - - - - - - org.apache.maven.plugins - maven-checkstyle-plugin - ${maven.checkstyle.plugin.version} - - - org.codehaus.mojo - exec-maven-plugin - ${maven.exec.plugin.version} - - - org.apache.maven.plugins - maven-surefire-plugin - ${maven.surefire.plugin.version} - - - - - - - org.apache.maven.plugins - maven-antrun-plugin - - - setup-test-dirs - process-test-resources - - run - - - - - - - - - - - - - - - - - - setup-metastore-scripts - process-test-resources - - run - - - - - - - - - - - - - - - - - - org.apache.maven.plugins - maven-failsafe-plugin - - - - integration-test - verify - - - - - true - false - -Xmx2048m - false - - true - ${test.tmp.dir} - ${test.tmp.dir} - true - - - ${log4j.conf.dir} - - ${skipITests} - - - - - org.apache.maven.plugins - maven-surefire-plugin - - true - false - ${test.forkcount} - -Xmx2048m - false - - ${project.build.directory} - true - ${derby.version} - ${test.tmp.dir}/derby.log - - ${test.log4j.scheme}${test.conf.dir}/hive-log4j2.properties - true - ${test.tmp.dir} - - jdbc:derby:${test.tmp.dir}/junit_metastore_db;create=true - false - ${test.tmp.dir} - ${test.warehouse.scheme}${test.warehouse.dir} - - - - ${log4j.conf.dir} - ${test.conf.dir} - - ${test.conf.dir}/conf - - - - - org.apache.maven.plugins - maven-jar-plugin - - - - test-jar - - - - - - - diff --git a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CloseableThreadLocal.java b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CloseableThreadLocal.java deleted file mode 100644 index fbe0a80d4883..000000000000 --- a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CloseableThreadLocal.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Consumer; -import java.util.function.Supplier; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class has similar functionality as java.lang.ThreadLocal. - * Plus it provides a close function to clean up unmanaged resources in all threads where the resource was initialized. - * @param - type of resource - */ -public class CloseableThreadLocal { - - private static final Logger LOG = LoggerFactory.getLogger(CloseableThreadLocal.class); - - private final ConcurrentHashMap threadLocalMap; - private final Supplier initialValue; - private final Consumer closeFunction; - - public CloseableThreadLocal(Supplier initialValue, Consumer closeFunction, int poolSize) { - this.initialValue = initialValue; - threadLocalMap = new ConcurrentHashMap<>(poolSize); - this.closeFunction = closeFunction; - } - - public T get() { - return threadLocalMap.computeIfAbsent(Thread.currentThread(), thread -> initialValue.get()); - } - - public void close() { - threadLocalMap.values().forEach(this::closeQuietly); - } - - private void closeQuietly(T resource) { - try { - closeFunction.accept(resource); - } catch (Exception e) { - LOG.warn("Error while closing resource.", e); - } - } -} diff --git a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactTablesState.java b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactTablesState.java deleted file mode 100644 index beb934c83eb8..000000000000 --- a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactTablesState.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import static java.util.Collections.emptyList; - -import java.util.ArrayList; -import java.util.List; - -/** - * Store result of database and table scan: compaction commands and meta info. - */ -public final class CompactTablesState { - - public static CompactTablesState empty() { - return new CompactTablesState(emptyList(), new CompactionMetaInfo()); - } - - public static CompactTablesState compactions(List compactionCommands, CompactionMetaInfo compactionMetaInfo) { - return new CompactTablesState(compactionCommands, compactionMetaInfo); - } - - private final List compactionCommands; - private final CompactionMetaInfo compactionMetaInfo; - - private CompactTablesState(List compactionCommands, CompactionMetaInfo compactionMetaInfo) { - this.compactionCommands = compactionCommands; - this.compactionMetaInfo = compactionMetaInfo; - } - - public List getCompactionCommands() { - return compactionCommands; - } - - public CompactionMetaInfo getMetaInfo() { - return compactionMetaInfo; - } - - public CompactTablesState merge(CompactTablesState other) { - List compactionCommands = new ArrayList<>(this.compactionCommands); - compactionCommands.addAll(other.compactionCommands); - return new CompactTablesState(compactionCommands, this.compactionMetaInfo.merge(other.compactionMetaInfo)); - } -} diff --git a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactionMetaInfo.java b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactionMetaInfo.java deleted file mode 100644 index 72b4ec63a9e0..000000000000 --- a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/CompactionMetaInfo.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import java.util.HashSet; -import java.util.Set; - -/** - * Store result of compaction calls. - */ -public class CompactionMetaInfo { - /** - * total number of bytes to be compacted across all compaction commands. - */ - private long numberOfBytes; - /** - * IDs of compactions launched by this utility. - */ - private final Set compactionIds; - - public CompactionMetaInfo() { - compactionIds = new HashSet<>(); - numberOfBytes = 0; - } - - private CompactionMetaInfo(Set initialCompactionIds, long initialNumberOfBytes) { - this.compactionIds = new HashSet<>(initialCompactionIds); - numberOfBytes = initialNumberOfBytes; - } - - public CompactionMetaInfo merge(CompactionMetaInfo other) { - CompactionMetaInfo result = new CompactionMetaInfo(this.compactionIds, this.numberOfBytes); - result.numberOfBytes += other.numberOfBytes; - result.compactionIds.addAll(other.compactionIds); - return result; - } - - public long getNumberOfBytes() { - return numberOfBytes; - } - - public void addBytes(long bytes) { - numberOfBytes += bytes; - } - - public Set getCompactionIds() { - return compactionIds; - } - - public void addCompactionId(long compactionId) { - compactionIds.add(compactionId); - } -} diff --git a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java deleted file mode 100644 index b72b236b4677..000000000000 --- a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/PreUpgradeTool.java +++ /dev/null @@ -1,750 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import static org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.escapeSQLString; - -import java.io.FileWriter; -import java.io.IOException; -import java.io.PrintWriter; -import java.nio.ByteBuffer; -import java.nio.charset.CharacterCodingException; -import java.nio.charset.Charset; -import java.nio.charset.CharsetDecoder; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.concurrent.ForkJoinPool; -import java.util.stream.Collectors; - -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.GnuParser; -import org.apache.commons.cli.HelpFormatter; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.commons.lang3.exception.ExceptionUtils; -import org.apache.hadoop.fs.ContentSummary; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.PathFilter; -import org.apache.hadoop.hive.common.ValidTxnList; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.HiveMetaHook; -import org.apache.hadoop.hive.metastore.HiveMetaHookLoader; -import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; -import org.apache.hadoop.hive.metastore.IMetaStoreClient; -import org.apache.hadoop.hive.metastore.RetryingMetaStoreClient; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.CompactionResponse; -import org.apache.hadoop.hive.metastore.api.CompactionType; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; -import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.ql.io.AcidUtils; -import org.apache.hadoop.hive.ql.io.orc.OrcFile; -import org.apache.hadoop.hive.ql.io.orc.Reader; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.shims.HadoopShims; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hive.common.util.HiveVersionInfo; -import org.apache.thrift.TException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.google.common.annotations.VisibleForTesting; - -/** - * This utility is designed to help with upgrading Hive 2.x to Hive 3.0. On-disk layout for - * transactional tables has changed in 3.0 and require pre-processing before upgrade to ensure - * they are readable by Hive 3.0. Some transactional tables (identified by this utility) require - * Major compaction to be run on them before upgrading to 3.0. Once this compaction starts, no - * more update/delete/merge statements may be executed on these tables until upgrade is finished. - * - * Additionally, a new type of transactional tables was added in 3.0 - insert-only tables. These - * tables support ACID semantics and work with any Input/OutputFormat. Any Managed tables may - * be made insert-only transactional table. These tables don't support Update/Delete/Merge commands. - * - * Note that depending on the number of tables/partitions and amount of data in them compactions - * may take a significant amount of time and resources. The script output by this utility includes - * some heuristics that may help estimate the time required. If no script is produced, no action - * is needed. For compactions to run an instance of standalone Hive Metastore must be running. - * Please make sure hive.compactor.worker.threads is sufficiently high - this specifies the limit - * of concurrent compactions that may be run. Each compaction job is a Map-Reduce job. - * hive.compactor.job.queue may be used to set a Yarn queue ame where all compaction jobs will be - * submitted. - * - * "execute" option may be supplied to have the utility automatically execute the - * equivalent of the generated commands - * - * "location" option may be supplied followed by a path to set the location for the generated - * scripts. - * - * Random: - * This utility connects to the Metastore via API. It may be necessary to set - * -Djavax.security.auth.useSubjectCredsOnly=false in Kerberized environment if errors like - * "org.ietf.jgss.GSSException: No valid credentials provided ( - * Mechanism level: Failed to find any Kerberos tgt)" - * show up after kinit. - * - * See also org.apache.hadoop.hive.ql.util.UpgradeTool in Hive 3.x - */ -public class PreUpgradeTool implements AutoCloseable { - private static final Logger LOG = LoggerFactory.getLogger(PreUpgradeTool.class); - private static final int PARTITION_BATCH_SIZE = 10000; - - public static void main(String[] args) throws Exception { - Options cmdLineOptions = createCommandLineOptions(); - CommandLineParser parser = new GnuParser(); - CommandLine line; - try { - line = parser.parse(cmdLineOptions, args); - } catch (ParseException e) { - System.err.println("PreUpgradeTool: Parsing failed. Reason: " + e.getLocalizedMessage()); - printAndExit(cmdLineOptions); - return; - } - if (line.hasOption("help")) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("upgrade-acid", cmdLineOptions); - return; - } - RunOptions runOptions = RunOptions.fromCommandLine(line); - LOG.info("Starting with " + runOptions.toString()); - - try { - String hiveVer = HiveVersionInfo.getShortVersion(); - LOG.info("Using Hive Version: " + HiveVersionInfo.getVersion() + " build: " + - HiveVersionInfo.getBuildVersion()); - if(!hiveVer.startsWith("2.")) { - throw new IllegalStateException("preUpgrade requires Hive 2.x. Actual: " + hiveVer); - } - try (PreUpgradeTool tool = new PreUpgradeTool(runOptions)) { - tool.prepareAcidUpgradeInternal(); - } - } catch(Exception ex) { - LOG.error("PreUpgradeTool failed", ex); - throw ex; - } - } - - private final HiveConf conf; - private final CloseableThreadLocal metaStoreClient; - private final ThreadLocal txns; - private final RunOptions runOptions; - - public PreUpgradeTool(RunOptions runOptions) { - this.runOptions = runOptions; - this.conf = hiveConf != null ? hiveConf : new HiveConf(); - this.metaStoreClient = new CloseableThreadLocal<>(this::getHMS, IMetaStoreClient::close, - runOptions.getTablePoolSize()); - this.txns = ThreadLocal.withInitial(() -> { - /* - This API changed from 2.x to 3.0. so this won't even compile with 3.0 - but it doesn't need to since we only run this preUpgrade - */ - try { - TxnStore txnHandler = TxnUtils.getTxnStore(conf); - return TxnUtils.createValidCompactTxnList(txnHandler.getOpenTxnsInfo()); - } catch (MetaException e) { - throw new RuntimeException(e); - } - }); - } - - private static void printAndExit(Options cmdLineOptions) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("upgrade-acid", cmdLineOptions); - System.exit(1); - } - - static Options createCommandLineOptions() { - try { - Options cmdLineOptions = new Options(); - cmdLineOptions.addOption(new Option("help", "Generates a script to execute on 2.x" + - " cluster. This requires 2.x binaries on the classpath and hive-site.xml.")); - Option exec = new Option("execute", - "Executes commands equivalent to generated scrips"); - exec.setOptionalArg(true); - cmdLineOptions.addOption(exec); - Option locationOption = new Option("location", true, - "Location to write scripts to. Default is CWD."); - locationOption.setArgName("path of directory"); - cmdLineOptions.addOption(locationOption); - - Option dbRegexOption = new Option("d", - "Regular expression to match database names on which this tool will be run. Default: all databases"); - dbRegexOption.setLongOpt("dbRegex"); - dbRegexOption.setArgs(1); - dbRegexOption.setArgName("regex"); - cmdLineOptions.addOption(dbRegexOption); - - Option tableRegexOption = new Option("t", - "Regular expression to match table names on which this tool will be run. Default: all tables"); - tableRegexOption.setLongOpt("tableRegex"); - tableRegexOption.setArgs(1); - tableRegexOption.setArgName("regex"); - cmdLineOptions.addOption(tableRegexOption); - - Option tableTypeOption = new Option("tt", - String.format("Table type to match tables on which this tool will be run. Possible values: %s " + - "Default: all tables", - Arrays.stream(TableType.values()).map(Enum::name).collect(Collectors.joining("|")))); - tableTypeOption.setLongOpt("tableType"); - tableTypeOption.setArgs(1); - tableTypeOption.setArgName("table type"); - cmdLineOptions.addOption(tableTypeOption); - - Option tablePoolSizeOption = new Option("tn", "Number of threads to process tables."); - tablePoolSizeOption.setLongOpt("tablePoolSize"); - tablePoolSizeOption.setArgs(1); - tablePoolSizeOption.setArgName("pool size"); - cmdLineOptions.addOption(tablePoolSizeOption); - - return cmdLineOptions; - } catch(Exception ex) { - LOG.error("init()", ex); - throw ex; - } - } - - private static HiveMetaHookLoader getHookLoader() { - return new HiveMetaHookLoader() { - @Override - public HiveMetaHook getHook( - org.apache.hadoop.hive.metastore.api.Table tbl) { - return null; - } - }; - } - - public IMetaStoreClient getHMS() { - UserGroupInformation loggedInUser = null; - try { - loggedInUser = UserGroupInformation.getLoginUser(); - } catch (IOException e) { - LOG.warn("Unable to get logged in user via UGI. err: {}", e.getMessage()); - } - boolean secureMode = loggedInUser != null && loggedInUser.hasKerberosCredentials(); - if (secureMode) { - conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, true); - } - try { - LOG.info("Creating metastore client for {}", "PreUpgradeTool"); - /* I'd rather call return RetryingMetaStoreClient.getProxy(conf, true) - which calls HiveMetaStoreClient(HiveConf, Boolean) which exists in - (at least) 2.1.0.2.6.5.0-292 and later but not in 2.1.0.2.6.0.3-8 (the HDP 2.6 release) - i.e. RetryingMetaStoreClient.getProxy(conf, true) is broken in 2.6.0*/ - IMetaStoreClient client = RetryingMetaStoreClient.getProxy(conf, - new Class[]{HiveConf.class, HiveMetaHookLoader.class, Boolean.class}, - new Object[]{conf, getHookLoader(), Boolean.TRUE}, null, HiveMetaStoreClient.class.getName()); - if (hiveConf != null) { - SessionState ss = SessionState.start(conf); - ss.applyAuthorizationPolicy(); - } - return client; - } catch (MetaException | HiveException e) { - throw new RuntimeException("Error connecting to Hive Metastore URI: " - + conf.getVar(HiveConf.ConfVars.METASTOREURIS) + ". " + e.getMessage(), e); - } - } - - /* - * todo: change script comments to a preamble instead of a footer - */ - private void prepareAcidUpgradeInternal() - throws HiveException, TException, IOException { - if (!isAcidEnabled(conf)) { - LOG.info("acid is off, there can't be any acid tables - nothing to compact"); - return; - } - IMetaStoreClient hms = metaStoreClient.get(); - LOG.debug("Looking for databases"); - String exceptionMsg = null; - List databases; - CompactTablesState compactTablesState; - try { - databases = hms.getDatabases(runOptions.getDbRegex()); //TException - LOG.debug("Found " + databases.size() + " databases to process"); - - ForkJoinPool processTablePool = new ForkJoinPool( - runOptions.getTablePoolSize(), - new NamedForkJoinWorkerThreadFactory("Table-"), - getUncaughtExceptionHandler(), - false - ); - compactTablesState = databases.stream() - .map(dbName -> processDatabase(dbName, processTablePool, runOptions)) - .reduce(CompactTablesState::merge) - .orElse(CompactTablesState.empty()); - - } catch (Exception e) { - if (isAccessControlException(e)) { - exceptionMsg = "Unable to get databases. Pre-upgrade tool requires read-access " + - "to databases and tables to determine if a table has to be compacted. " + - "Set " + HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to " + - "false to allow read-access to databases and tables and retry the pre-upgrade tool again.."; - } - throw new HiveException(exceptionMsg, e); - } - - makeCompactionScript(compactTablesState, runOptions.getOutputDir()); - - if(runOptions.isExecute()) { - while(compactTablesState.getMetaInfo().getCompactionIds().size() > 0) { - LOG.debug("Will wait for " + compactTablesState.getMetaInfo().getCompactionIds().size() + - " compactions to complete"); - ShowCompactResponse resp = hms.showCompactions(); - for(ShowCompactResponseElement e : resp.getCompacts()) { - final String state = e.getState(); - boolean removed; - switch (state) { - case TxnStore.CLEANING_RESPONSE: - case TxnStore.SUCCEEDED_RESPONSE: - removed = compactTablesState.getMetaInfo().getCompactionIds().remove(e.getId()); - if(removed) { - LOG.debug("Required compaction succeeded: " + e.toString()); - } - break; - case TxnStore.ATTEMPTED_RESPONSE: - case TxnStore.FAILED_RESPONSE: - removed = compactTablesState.getMetaInfo().getCompactionIds().remove(e.getId()); - if(removed) { - LOG.warn("Required compaction failed: " + e.toString()); - } - break; - case TxnStore.INITIATED_RESPONSE: - //may flood the log - //LOG.debug("Still waiting on: " + e.toString()); - break; - case TxnStore.WORKING_RESPONSE: - LOG.debug("Still working on: " + e.toString()); - break; - default://shouldn't be any others - LOG.error("Unexpected state for : " + e.toString()); - } - } - if(compactTablesState.getMetaInfo().getCompactionIds().size() > 0) { - try { - if (callback != null) { - callback.onWaitForCompaction(); - } - Thread.sleep(pollIntervalMs); - } catch (InterruptedException ex) { - //this only responds to ^C - } - } - } - } - } - - private Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { - return (t, e) -> LOG.error(String.format("Thread %s exited with error", t.getName()), e); - } - - private CompactTablesState processDatabase( - String dbName, ForkJoinPool threadPool, RunOptions runOptions) { - try { - IMetaStoreClient hms = metaStoreClient.get(); - - List tables; - if (runOptions.getTableType() == null) { - tables = hms.getTables(dbName, runOptions.getTableRegex()); - LOG.debug("found {} tables in {}", tables.size(), dbName); - } else { - tables = hms.getTables(dbName, runOptions.getTableRegex(), runOptions.getTableType()); - LOG.debug("found {} {} in {}", tables.size(), runOptions.getTableType().name(), dbName); - } - - return threadPool.submit( - () -> tables.parallelStream() - .map(table -> processTable(dbName, table, runOptions)) - .reduce(CompactTablesState::merge)).get() - .orElse(CompactTablesState.empty()); - } catch (Exception e) { - if (isAccessControlException(e)) { - // we may not have access to read all tables from this db - throw new RuntimeException("Unable to access " + dbName + ". Pre-upgrade tool requires read-access " + - "to databases and tables to determine if a table has to be compacted. " + - "Set " + HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to " + - "false to allow read-access to databases and tables and retry the pre-upgrade tool again..", e); - } - throw new RuntimeException(e); - } - } - - private CompactTablesState processTable( - String dbName, String tableName, RunOptions runOptions) { - try { - IMetaStoreClient hms = metaStoreClient.get(); - final CompactionMetaInfo compactionMetaInfo = new CompactionMetaInfo(); - - Table t = hms.getTable(dbName, tableName); - LOG.debug("processing table " + Warehouse.getQualifiedName(t)); - List compactionCommands = - getCompactionCommands(t, conf, hms, compactionMetaInfo, runOptions.isExecute(), txns.get()); - return CompactTablesState.compactions(compactionCommands, compactionMetaInfo); - /*todo: handle renaming files somewhere*/ - } catch (Exception e) { - if (isAccessControlException(e)) { - // this could be external table with 0 permission for hive user - throw new RuntimeException( - "Unable to access " + dbName + "." + tableName + ". Pre-upgrade tool requires read-access " + - "to databases and tables to determine if a table has to be compacted. " + - "Set " + HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_AUTH_READS.varname + " config to " + - "false to allow read-access to databases and tables and retry the pre-upgrade tool again..", e); - } - throw new RuntimeException(e); - } - } - - private boolean isAccessControlException(final Exception e) { - // hadoop security AccessControlException - if ((e instanceof MetaException && e.getCause() instanceof AccessControlException) || - ExceptionUtils.getRootCause(e) instanceof AccessControlException) { - return true; - } - - // java security AccessControlException - if ((e instanceof MetaException && e.getCause() instanceof java.security.AccessControlException) || - ExceptionUtils.getRootCause(e) instanceof java.security.AccessControlException) { - return true; - } - - // metastore in some cases sets the AccessControlException as message instead of wrapping the exception - return e instanceof MetaException - && e.getMessage().startsWith("java.security.AccessControlException: Permission denied"); - } - - /** - * Generates a set compaction commands to run on pre Hive 3 cluster. - */ - private static void makeCompactionScript(CompactTablesState result, String scriptLocation) throws IOException { - if (result.getCompactionCommands().isEmpty()) { - LOG.info("No compaction is necessary"); - return; - } - String fileName = "compacts_" + System.currentTimeMillis() + ".sql"; - LOG.debug("Writing compaction commands to " + fileName); - try(PrintWriter pw = createScript( - result.getCompactionCommands(), fileName, scriptLocation)) { - //add post script - pw.println("-- Generated total of " + result.getCompactionCommands().size() + " compaction commands"); - if(result.getMetaInfo().getNumberOfBytes() < Math.pow(2, 20)) { - //to see it working in UTs - pw.println("-- The total volume of data to be compacted is " + - String.format("%.6fMB", result.getMetaInfo().getNumberOfBytes()/Math.pow(2, 20))); - } else { - pw.println("-- The total volume of data to be compacted is " + - String.format("%.3fGB", result.getMetaInfo().getNumberOfBytes()/Math.pow(2, 30))); - } - pw.println(); - //todo: should be at the top of the file... - pw.println( - "-- Please note that compaction may be a heavyweight and time consuming process.\n" + - "-- Submitting all of these commands will enqueue them to a scheduling queue from\n" + - "-- which they will be picked up by compactor Workers. The max number of\n" + - "-- concurrent Workers is controlled by hive.compactor.worker.threads configured\n" + - "-- for the standalone metastore process. Compaction itself is a Map-Reduce job\n" + - "-- which is submitted to the YARN queue identified by hive.compactor.job.queue\n" + - "-- property if defined or 'default' if not defined. It's advisable to set the\n" + - "-- capacity of this queue appropriately"); - } - } - - private static PrintWriter createScript(List commands, String fileName, - String scriptLocation) throws IOException { - FileWriter fw = new FileWriter(scriptLocation + "/" + fileName); - PrintWriter pw = new PrintWriter(fw); - for(String cmd : commands) { - pw.println(cmd + ";"); - } - return pw; - } - /** - * @return any compaction commands to run for {@code Table t} - */ - private static List getCompactionCommands(Table t, HiveConf conf, - IMetaStoreClient hms, CompactionMetaInfo compactionMetaInfo, boolean execute, - ValidTxnList txns) throws IOException, TException, HiveException { - if(!isFullAcidTable(t)) { - return Collections.emptyList(); - } - if(t.getPartitionKeysSize() <= 0) { - //not partitioned - if(!needsCompaction(new Path(t.getSd().getLocation()), conf, compactionMetaInfo, txns)) { - return Collections.emptyList(); - } - - List cmds = new ArrayList<>(); - cmds.add(getCompactionCommand(t, null)); - if(execute) { - scheduleCompaction(t, null, hms, compactionMetaInfo); - } - return cmds; - } - List partNames = hms.listPartitionNames(t.getDbName(), t.getTableName(), (short)-1); - int batchSize = PARTITION_BATCH_SIZE; - int numWholeBatches = partNames.size()/batchSize; - List compactionCommands = new ArrayList<>(); - for(int i = 0; i < numWholeBatches; i++) { - List partitionList = hms.getPartitionsByNames(t.getDbName(), t.getTableName(), - partNames.subList(i * batchSize, (i + 1) * batchSize)); - getCompactionCommands(t, partitionList, hms, execute, compactionCommands, - compactionMetaInfo, conf, txns); - } - if(numWholeBatches * batchSize < partNames.size()) { - //last partial batch - List partitionList = hms.getPartitionsByNames(t.getDbName(), t.getTableName(), - partNames.subList(numWholeBatches * batchSize, partNames.size())); - getCompactionCommands(t, partitionList, hms, execute, compactionCommands, - compactionMetaInfo, conf, txns); - } - return compactionCommands; - } - private static void getCompactionCommands(Table t, List partitionList, IMetaStoreClient hms, - boolean execute, List compactionCommands, CompactionMetaInfo compactionMetaInfo, - HiveConf conf, ValidTxnList txns) - throws IOException, TException, HiveException { - for (Partition p : partitionList) { - if (needsCompaction(new Path(p.getSd().getLocation()), conf, compactionMetaInfo, txns)) { - compactionCommands.add(getCompactionCommand(t, p)); - if (execute) { - scheduleCompaction(t, p, hms, compactionMetaInfo); - } - } - } - } - private static void scheduleCompaction(Table t, Partition p, IMetaStoreClient db, - CompactionMetaInfo compactionMetaInfo) throws HiveException, MetaException { - String partName = p == null ? null : - Warehouse.makePartName(t.getPartitionKeys(), p.getValues()); - try { - CompactionResponse resp = - //this gives an easy way to get at compaction ID so we can only wait for those this - //utility started - db.compact2(t.getDbName(), t.getTableName(), partName, CompactionType.MAJOR, null); - if (!resp.isAccepted()) { - LOG.info(Warehouse.getQualifiedName(t) + (p == null ? "" : "/" + partName) + - " is already being compacted with id=" + resp.getId()); - } else { - LOG.info("Scheduled compaction for " + Warehouse.getQualifiedName(t) + - (p == null ? "" : "/" + partName) + " with id=" + resp.getId()); - } - compactionMetaInfo.addCompactionId(resp.getId()); - } catch (TException e) { - throw new HiveException(e); - } - } - - /** - * - * @param location - path to a partition (or table if not partitioned) dir - */ - private static boolean needsCompaction(Path location, HiveConf conf, - CompactionMetaInfo compactionMetaInfo, ValidTxnList txns) throws IOException { - FileSystem fs = location.getFileSystem(conf); - FileStatus[] deltas = fs.listStatus(location, new PathFilter() { - @Override - public boolean accept(Path path) { - //checking for delete_delta is only so that this functionality can be exercised by code 3.0 - //which cannot produce any deltas with mix of update/insert events - return path.getName().startsWith("delta_") || path.getName().startsWith("delete_delta_"); - } - }); - if(deltas == null || deltas.length == 0) { - //base_n cannot contain update/delete. Original files are all 'insert' and we need to compact - //only if there are update/delete events. - return false; - } - /*getAcidState() is smart not to return any deltas in current if there is a base that covers - * them, i.e. if they were compacted but not yet cleaned. This means re-checking if - * compaction is needed should cheap(er)*/ - AcidUtils.Directory dir = AcidUtils.getAcidState(location, conf, txns); - deltaLoop: for(AcidUtils.ParsedDelta delta : dir.getCurrentDirectories()) { - FileStatus[] buckets = fs.listStatus(delta.getPath(), new PathFilter() { - @Override - public boolean accept(Path path) { - //since this is inside a delta dir created by Hive 2.x or earlier it can only contain - //bucket_x or bucket_x__flush_length - return path.getName().startsWith("bucket_"); - } - }); - for(FileStatus bucket : buckets) { - if(bucket.getPath().getName().endsWith("_flush_length")) { - //streaming ingest dir - cannot have update/delete events - continue deltaLoop; - } - if(needsCompaction(bucket, fs)) { - //found delete events - this 'location' needs compacting - compactionMetaInfo.addBytes(getDataSize(location, conf)); - - //if there are un-compacted original files, they will be included in compaction, so - //count at the size for 'cost' estimation later - for(HadoopShims.HdfsFileStatusWithId origFile : dir.getOriginalFiles()) { - FileStatus fileStatus = origFile.getFileStatus(); - if(fileStatus != null) { - compactionMetaInfo.addBytes(fileStatus.getLen()); - } - } - return true; - } - } - } - return false; - } - - /** - * @param location - path to a partition (or table if not partitioned) dir - */ - private static long getDataSize(Path location, HiveConf conf) throws IOException { - FileSystem fs = location.getFileSystem(conf); - ContentSummary cs = fs.getContentSummary(location); - return cs.getLength(); - } - - - private static final Charset UTF_8 = StandardCharsets.UTF_8; - private static final ThreadLocal UTF8_DECODER = - ThreadLocal.withInitial(UTF_8::newDecoder); - private static final String ACID_STATS = "hive.acid.stats"; - - private static boolean needsCompaction(FileStatus bucket, FileSystem fs) throws IOException { - //create reader, look at footer - //no need to check side file since it can only be in a streaming ingest delta - Reader orcReader = OrcFile.createReader(bucket.getPath(), OrcFile.readerOptions(fs.getConf()).filesystem(fs)); - if (orcReader.hasMetadataValue(ACID_STATS)) { - try { - ByteBuffer val = orcReader.getMetadataValue(ACID_STATS).duplicate(); - String acidStats = UTF8_DECODER.get().decode(val).toString(); - String[] parts = acidStats.split(","); - long updates = Long.parseLong(parts[1]); - long deletes = Long.parseLong(parts[2]); - return deletes > 0 || updates > 0; - } catch (CharacterCodingException e) { - throw new IllegalArgumentException("Bad string encoding for " + ACID_STATS, e); - } - } else { - throw new IllegalStateException("AcidStats missing in " + bucket.getPath()); - } - } - - private static String getCompactionCommand(Table t, Partition p) { - StringBuilder sb = new StringBuilder("ALTER TABLE ").append(Warehouse.getQualifiedName(t)); - if(t.getPartitionKeysSize() > 0) { - assert p != null : "must supply partition for partitioned table " + - Warehouse.getQualifiedName(t); - sb.append(" PARTITION("); - for (int i = 0; i < t.getPartitionKeysSize(); i++) { - sb.append(t.getPartitionKeys().get(i).getName()).append('=').append( - genPartValueString(t.getPartitionKeys().get(i).getType(), p.getValues().get(i))). - append(","); - } - //replace trailing ',' - sb.setCharAt(sb.length() - 1, ')'); - } - return sb.append(" COMPACT 'major'").toString(); - } - - /** - * This is copy-pasted from {@link org.apache.hadoop.hive.ql.parse.ColumnStatsSemanticAnalyzer}, - * which can't be refactored since this is linked against Hive 2.x . - */ - private static String genPartValueString(String partColType, String partVal) { - String returnVal; - if (partColType.equals(serdeConstants.STRING_TYPE_NAME) || - partColType.contains(serdeConstants.VARCHAR_TYPE_NAME) || - partColType.contains(serdeConstants.CHAR_TYPE_NAME)) { - returnVal = "'" + escapeSQLString(partVal) + "'"; - } else if (partColType.equals(serdeConstants.TINYINT_TYPE_NAME)) { - returnVal = partVal + "Y"; - } else if (partColType.equals(serdeConstants.SMALLINT_TYPE_NAME)) { - returnVal = partVal + "S"; - } else if (partColType.equals(serdeConstants.INT_TYPE_NAME)) { - returnVal = partVal; - } else if (partColType.equals(serdeConstants.BIGINT_TYPE_NAME)) { - returnVal = partVal + "L"; - } else if (partColType.contains(serdeConstants.DECIMAL_TYPE_NAME)) { - returnVal = partVal + "BD"; - } else if (partColType.equals(serdeConstants.DATE_TYPE_NAME) || - partColType.equals(serdeConstants.TIMESTAMP_TYPE_NAME)) { - returnVal = partColType + " '" + escapeSQLString(partVal) + "'"; - } else { - //for other usually not used types, just quote the value - returnVal = "'" + escapeSQLString(partVal) + "'"; - } - - return returnVal; - } - private static boolean isFullAcidTable(Table t) { - if (t.getParametersSize() <= 0) { - //cannot be acid - return false; - } - String transacationalValue = t.getParameters() - .get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL); - if ("true".equalsIgnoreCase(transacationalValue)) { - System.out.println("Found Acid table: " + Warehouse.getQualifiedName(t)); - return true; - } - return false; - } - private static boolean isAcidEnabled(HiveConf hiveConf) { - String txnMgr = hiveConf.getVar(HiveConf.ConfVars.HIVE_TXN_MANAGER); - boolean concurrency = hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY); - String dbTxnMgr = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"; - return txnMgr.equals(dbTxnMgr) && concurrency; - } - - @Override - public void close() { - metaStoreClient.close(); - } - - @VisibleForTesting - abstract static class Callback { - /** - * This is a hack enable Unit testing. Derby can't handle multiple concurrent threads but - * somehow Compactor needs to run to test "execute" mode. This callback can be used - * to run Worker. For TESTING ONLY. - */ - void onWaitForCompaction() throws MetaException {} - } - @VisibleForTesting - static Callback callback; - @VisibleForTesting - static int pollIntervalMs = 1000*30; - /** - * can set it from tests to test when config needs something other than default values. - */ - @VisibleForTesting - static HiveConf hiveConf = null; -} diff --git a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/RunOptions.java b/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/RunOptions.java deleted file mode 100644 index 534b971a7719..000000000000 --- a/upgrade-acid/pre-upgrade/src/main/java/org/apache/hadoop/hive/upgrade/acid/RunOptions.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import org.apache.commons.cli.CommandLine; -import org.apache.hadoop.hive.metastore.TableType; - -/** - * This class's instance holds the option values were passed by the user via the command line. - */ -public class RunOptions { - - public static RunOptions fromCommandLine(CommandLine commandLine) { - String tableTypeText = commandLine.getOptionValue("tableType"); - - int defaultPoolSize = Runtime.getRuntime().availableProcessors(); - if (defaultPoolSize < 1) - defaultPoolSize = 1; - - int tablePoolSize = getIntOptionValue(commandLine, "tablePoolSize", defaultPoolSize); - if (tablePoolSize < 1) - throw new IllegalArgumentException("Please specify a positive integer option value for tablePoolSize"); - - return new RunOptions( - commandLine.getOptionValue("location", "."), - commandLine.hasOption("execute"), - commandLine.getOptionValue("dbRegex", ".*"), - commandLine.getOptionValue("tableRegex", ".*"), - tableTypeText == null ? null : TableType.valueOf(tableTypeText), - tablePoolSize); - } - - private static int getIntOptionValue(CommandLine commandLine, String optionName, int defaultValue) { - if (commandLine.hasOption(optionName)) { - try { - return Integer.parseInt(commandLine.getOptionValue(optionName)); - } catch (NumberFormatException e) { - throw new IllegalArgumentException("Please specify a positive integer option value for " + optionName, e); - } - } - return defaultValue; - } - - private final String outputDir; - private final boolean execute; - private final String dbRegex; - private final String tableRegex; - private final TableType tableType; - private final int tablePoolSize; - - private RunOptions(String outputDir, boolean execute, String dbRegex, String tableRegex, TableType tableType, int tablePoolSize) { - this.outputDir = outputDir; - this.execute = execute; - this.dbRegex = dbRegex; - this.tableRegex = tableRegex; - this.tableType = tableType; - this.tablePoolSize = tablePoolSize; - } - - public String getOutputDir() { - return outputDir; - } - - public boolean isExecute() { - return execute; - } - - public String getDbRegex() { - return dbRegex; - } - - public String getTableRegex() { - return tableRegex; - } - - public TableType getTableType() { - return tableType; - } - - public int getTablePoolSize() { - return tablePoolSize; - } - - @Override - public String toString() { - return "RunOptions{" + - "outputDir='" + outputDir + '\'' + - ", execute=" + execute + - ", dbRegex='" + dbRegex + '\'' + - ", tableRegex='" + tableRegex + '\'' + - ", tableType=" + tableType + - ", tablePoolSize=" + tablePoolSize + - '}'; - } -} diff --git a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestCloseableThreadLocal.java b/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestCloseableThreadLocal.java deleted file mode 100644 index 2584a3be5285..000000000000 --- a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestCloseableThreadLocal.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; - -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; - -import org.junit.Test; - -public class TestCloseableThreadLocal { - - private static class AutoCloseableStub implements AutoCloseable { - - private boolean closed = false; - - public boolean isClosed() { - return closed; - } - - @Override - public void close() { - closed = true; - } - } - - @Test - public void testResourcesAreInitiallyNotClosed() { - CloseableThreadLocal closeableThreadLocal = - new CloseableThreadLocal<>(AutoCloseableStub::new, AutoCloseableStub::close, 1); - - assertThat(closeableThreadLocal.get().isClosed(), is(false)); - } - - @Test - public void testAfterCallingCloseAllInstancesAreClosed() throws ExecutionException, InterruptedException { - CloseableThreadLocal closeableThreadLocal = - new CloseableThreadLocal<>(AutoCloseableStub::new, AutoCloseableStub::close, 2); - - AutoCloseableStub asyncInstance = CompletableFuture.supplyAsync(closeableThreadLocal::get).get(); - AutoCloseableStub syncInstance = closeableThreadLocal.get(); - - closeableThreadLocal.close(); - - assertThat(asyncInstance.isClosed(), is(true)); - assertThat(syncInstance.isClosed(), is(true)); - } - - @Test - public void testSubsequentGetsInTheSameThreadGivesBackTheSameObject() { - CloseableThreadLocal closeableThreadLocal = - new CloseableThreadLocal<>(AutoCloseableStub::new, AutoCloseableStub::close, 2); - - AutoCloseableStub ref1 = closeableThreadLocal.get(); - AutoCloseableStub ref2 = closeableThreadLocal.get(); - assertThat(ref1, is(ref2)); - } - - @Test - public void testDifferentThreadsHasDifferentInstancesOfTheResource() throws ExecutionException, InterruptedException { - CloseableThreadLocal closeableThreadLocal = - new CloseableThreadLocal<>(AutoCloseableStub::new, AutoCloseableStub::close, 2); - - AutoCloseableStub asyncInstance = CompletableFuture.supplyAsync(closeableThreadLocal::get).get(); - AutoCloseableStub syncInstance = closeableThreadLocal.get(); - assertThat(asyncInstance, is(not(syncInstance))); - } -} \ No newline at end of file diff --git a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java b/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java deleted file mode 100644 index 2064baa544c0..000000000000 --- a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestPreUpgradeTool.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import static org.hamcrest.CoreMatchers.allOf; -import static org.hamcrest.CoreMatchers.hasItem; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.not; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.core.StringContains.containsString; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.attribute.PosixFilePermission; -import java.nio.file.attribute.PosixFilePermissions; -import java.util.ArrayList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.common.FileUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; -import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; -import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; -import org.apache.hadoop.hive.metastore.txn.TxnStore; -import org.apache.hadoop.hive.metastore.txn.TxnUtils; -import org.apache.hadoop.hive.ql.Driver; -import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.io.HiveInputFormat; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.processors.CommandProcessorResponse; -import org.apache.hadoop.hive.ql.session.SessionState; -import org.apache.hadoop.hive.ql.txn.compactor.Worker; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; - -public class TestPreUpgradeTool { - private static final String TEST_DATA_DIR = new File(System.getProperty("java.io.tmpdir") + - File.separator + TestPreUpgradeTool.class.getCanonicalName() + "-" + System.currentTimeMillis() - ).getPath().replaceAll("\\\\", "/"); - - private String getTestDataDir() { - return TEST_DATA_DIR; - } - - /** - * preUpgrade: test tables that need to be compacted, waits for compaction - * postUpgrade: generates scripts w/o asserts - */ - @Test - public void testUpgrade() throws Exception { - int[][] data = {{1, 2}, {3, 4}, {5, 6}}; - int[][] dataPart = {{1, 2, 10}, {3, 4, 11}, {5, 6, 12}}; - runStatementOnDriver("drop table if exists TAcid"); - runStatementOnDriver("drop table if exists TAcidPart"); - runStatementOnDriver("drop table if exists TFlat"); - runStatementOnDriver("drop table if exists TFlatText"); - - try { - runStatementOnDriver( - "create table TAcid (a int, b int) clustered by (b) into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - runStatementOnDriver( - "create table TAcidPart (a int, b int) partitioned by (p tinyint) clustered by (b) into 2 buckets stored" + - " as orc TBLPROPERTIES ('transactional'='true')"); - //on 2.x these are guaranteed to not be acid - runStatementOnDriver("create table TFlat (a int, b int) stored as orc tblproperties('transactional'='false')"); - runStatementOnDriver( - "create table TFlatText (a int, b int) stored as textfile tblproperties('transactional'='false')"); - - - //this needs major compaction - runStatementOnDriver("insert into TAcid" + makeValuesClause(data)); - runStatementOnDriver("update TAcid set a = 1 where b = 2"); - - //this table needs to be converted to CRUD Acid - runStatementOnDriver("insert into TFlat" + makeValuesClause(data)); - - //this table needs to be converted to MM - runStatementOnDriver("insert into TFlatText" + makeValuesClause(data)); - - //p=10 needs major compaction - runStatementOnDriver("insert into TAcidPart partition(p)" + makeValuesClause(dataPart)); - runStatementOnDriver("update TAcidPart set a = 1 where b = 2 and p = 10"); - - //todo: add partitioned table that needs conversion to MM/Acid - - //todo: rename files case - String[] args = {"-location", getTestDataDir(), "-execute"}; - PreUpgradeTool.callback = new PreUpgradeTool.Callback() { - @Override - void onWaitForCompaction() throws MetaException { - runWorker(hiveConf); - } - }; - PreUpgradeTool.pollIntervalMs = 1; - PreUpgradeTool.hiveConf = hiveConf; - PreUpgradeTool.main(args); - - String[] scriptFiles = getScriptFiles(); - assertThat(scriptFiles.length, is(1)); - - List scriptContent = loadScriptContent(new File(getTestDataDir(), scriptFiles[0])); - assertThat(scriptContent.size(), is(2)); - assertThat(scriptContent, hasItem(is("ALTER TABLE default.tacid COMPACT 'major';"))); - assertThat(scriptContent, hasItem(is("ALTER TABLE default.tacidpart PARTITION(p=10Y) COMPACT 'major';"))); - - TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); - - ShowCompactResponse resp = txnHandler.showCompact(new ShowCompactRequest()); - Assert.assertEquals(2, resp.getCompactsSize()); - for (ShowCompactResponseElement e : resp.getCompacts()) { - Assert.assertEquals(e.toString(), TxnStore.CLEANING_RESPONSE, e.getState()); - } - - // Check whether compaction was successful in the first run - File secondRunDataDir = new File(getTestDataDir(), "secondRun"); - if (!secondRunDataDir.exists()) { - if (!secondRunDataDir.mkdir()) { - throw new IOException("Unable to create directory" + secondRunDataDir.getAbsolutePath()); - } - } - String[] args2 = {"-location", secondRunDataDir.getAbsolutePath()}; - PreUpgradeTool.main(args2); - - scriptFiles = secondRunDataDir.list(); - assertThat(scriptFiles, is(not(nullValue()))); - assertThat(scriptFiles.length, is(0)); - - } finally { - runStatementOnDriver("drop table if exists TAcid"); - runStatementOnDriver("drop table if exists TAcidPart"); - runStatementOnDriver("drop table if exists TFlat"); - runStatementOnDriver("drop table if exists TFlatText"); - } - } - - private static final String INCLUDE_DATABASE_NAME ="DInclude"; - private static final String EXCLUDE_DATABASE_NAME ="DExclude"; - - @Test - public void testOnlyFilteredDatabasesAreUpgradedWhenRegexIsGiven() throws Exception { - int[][] data = {{1, 2}, {3, 4}, {5, 6}}; - runStatementOnDriver("drop database if exists " + INCLUDE_DATABASE_NAME + " cascade"); - runStatementOnDriver("drop database if exists " + EXCLUDE_DATABASE_NAME + " cascade"); - - try { - runStatementOnDriver("create database " + INCLUDE_DATABASE_NAME); - runStatementOnDriver("use " + INCLUDE_DATABASE_NAME); - runStatementOnDriver("create table " + INCLUDE_TABLE_NAME + " (a int, b int) clustered by (b) " + - "into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - runStatementOnDriver("insert into " + INCLUDE_TABLE_NAME + makeValuesClause(data)); - runStatementOnDriver("update " + INCLUDE_TABLE_NAME + " set a = 1 where b = 2"); - - runStatementOnDriver("create database " + EXCLUDE_DATABASE_NAME); - runStatementOnDriver("use " + EXCLUDE_DATABASE_NAME); - runStatementOnDriver("create table " + EXCLUDE_DATABASE_NAME + " (a int, b int) clustered by (b) " + - "into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - runStatementOnDriver("insert into " + EXCLUDE_DATABASE_NAME + makeValuesClause(data)); - runStatementOnDriver("update " + EXCLUDE_DATABASE_NAME + " set a = 1 where b = 2"); - - String[] args = {"-location", getTestDataDir(), "-dbRegex", "*include*"}; - PreUpgradeTool.callback = new PreUpgradeTool.Callback() { - @Override - void onWaitForCompaction() throws MetaException { - runWorker(hiveConf); - } - }; - PreUpgradeTool.pollIntervalMs = 1; - PreUpgradeTool.hiveConf = hiveConf; - PreUpgradeTool.main(args); - - String[] scriptFiles = getScriptFiles(); - assertThat(scriptFiles.length, is(1)); - - List scriptContent = loadScriptContent(new File(getTestDataDir(), scriptFiles[0])); - assertThat(scriptContent.size(), is(1)); - assertThat(scriptContent.get(0), is("ALTER TABLE dinclude.tinclude COMPACT 'major';")); - - } finally { - runStatementOnDriver("drop database if exists " + INCLUDE_DATABASE_NAME + " cascade"); - runStatementOnDriver("drop database if exists " + EXCLUDE_DATABASE_NAME + " cascade"); - } - } - - private static final String INCLUDE_TABLE_NAME ="TInclude"; - private static final String EXCLUDE_TABLE_NAME ="TExclude"; - - @Test - public void testOnlyFilteredTablesAreUpgradedWhenRegexIsGiven() throws Exception { - int[][] data = {{1, 2}, {3, 4}, {5, 6}}; - runStatementOnDriver("drop table if exists " + INCLUDE_TABLE_NAME); - runStatementOnDriver("drop table if exists " + EXCLUDE_TABLE_NAME); - - try { - runStatementOnDriver("create table " + INCLUDE_TABLE_NAME + " (a int, b int) clustered by (b) " + - "into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - runStatementOnDriver("create table " + EXCLUDE_TABLE_NAME + " (a int, b int) clustered by (b) " + - "into 2 buckets stored as orc TBLPROPERTIES ('transactional'='true')"); - - runStatementOnDriver("insert into " + INCLUDE_TABLE_NAME + makeValuesClause(data)); - runStatementOnDriver("update " + INCLUDE_TABLE_NAME + " set a = 1 where b = 2"); - - runStatementOnDriver("insert into " + EXCLUDE_TABLE_NAME + makeValuesClause(data)); - runStatementOnDriver("update " + EXCLUDE_TABLE_NAME + " set a = 1 where b = 2"); - - String[] args = {"-location", getTestDataDir(), "-tableRegex", "*include*"}; - PreUpgradeTool.callback = new PreUpgradeTool.Callback() { - @Override - void onWaitForCompaction() throws MetaException { - runWorker(hiveConf); - } - }; - PreUpgradeTool.pollIntervalMs = 1; - PreUpgradeTool.hiveConf = hiveConf; - PreUpgradeTool.main(args); - - String[] scriptFiles = getScriptFiles(); - assertThat(scriptFiles.length, is(1)); - - List scriptContent = loadScriptContent(new File(getTestDataDir(), scriptFiles[0])); - assertThat(scriptContent.size(), is(1)); - assertThat(scriptContent.get(0), allOf( - containsString("ALTER TABLE"), - containsString(INCLUDE_TABLE_NAME.toLowerCase()), - containsString("COMPACT"))); - - } finally { - runStatementOnDriver("drop table if exists " + INCLUDE_TABLE_NAME); - runStatementOnDriver("drop table if exists " + EXCLUDE_TABLE_NAME); - } - } - - private String[] getScriptFiles() { - File testDataDir = new File(getTestDataDir()); - String[] scriptFiles = testDataDir.list((dir, name) -> name.startsWith("compacts_") && name.endsWith(".sql")); - assertThat(scriptFiles, is(not(nullValue()))); - return scriptFiles; - } - - private List loadScriptContent(File file) throws IOException { - List content = org.apache.commons.io.FileUtils.readLines(file); - content.removeIf(line -> line.startsWith("--")); - content.removeIf(StringUtils::isBlank); - return content; - } - - @Test - public void testUpgradeExternalTableNoReadPermissionForDatabase() throws Exception { - int[][] data = {{1, 2}, {3, 4}, {5, 6}}; - - runStatementOnDriver("drop database if exists test cascade"); - runStatementOnDriver("drop table if exists TExternal"); - - runStatementOnDriver("create database test"); - runStatementOnDriver("create table test.TExternal (a int, b int) stored as orc tblproperties" + - "('transactional'='false')"); - - //this needs major compaction - runStatementOnDriver("insert into test.TExternal" + makeValuesClause(data)); - - String dbDir = getWarehouseDir() + "/test.db"; - File dbPath = new File(dbDir); - try { - Set perms = PosixFilePermissions.fromString("-w-------"); - Files.setPosixFilePermissions(dbPath.toPath(), perms); - String[] args = {"-location", getTestDataDir(), "-execute"}; - PreUpgradeTool.pollIntervalMs = 1; - PreUpgradeTool.hiveConf = hiveConf; - Exception expected = null; - try { - PreUpgradeTool.main(args); - } catch (Exception e) { - expected = e; - } - - Assert.assertNotNull(expected); - Assert.assertTrue(expected instanceof HiveException); - Assert.assertTrue(expected.getMessage().contains("Pre-upgrade tool requires " + - "read-access to databases and tables to determine if a table has to be compacted.")); - } finally { - Set perms = PosixFilePermissions.fromString("rwxrw----"); - Files.setPosixFilePermissions(dbPath.toPath(), perms); - } - } - - @Test - public void testUpgradeExternalTableNoReadPermissionForTable() throws Exception { - int[][] data = {{1, 2}, {3, 4}, {5, 6}}; - runStatementOnDriver("drop table if exists TExternal"); - - runStatementOnDriver("create table TExternal (a int, b int) stored as orc tblproperties('transactional'='false')"); - - //this needs major compaction - runStatementOnDriver("insert into TExternal" + makeValuesClause(data)); - - String tableDir = getWarehouseDir() + "/texternal"; - File tablePath = new File(tableDir); - try { - Set perms = PosixFilePermissions.fromString("-w-------"); - Files.setPosixFilePermissions(tablePath.toPath(), perms); - String[] args = {"-location", getTestDataDir(), "-execute"}; - PreUpgradeTool.pollIntervalMs = 1; - PreUpgradeTool.hiveConf = hiveConf; - Exception expected = null; - try { - PreUpgradeTool.main(args); - } catch (Exception e) { - expected = e; - } - - Assert.assertNotNull(expected); - Assert.assertTrue(expected instanceof HiveException); - Assert.assertTrue(expected.getMessage().contains("Pre-upgrade tool requires" + - " read-access to databases and tables to determine if a table has to be compacted.")); - } finally { - Set perms = PosixFilePermissions.fromString("rwxrw----"); - Files.setPosixFilePermissions(tablePath.toPath(), perms); - } - } - - @Test - public void testConcurrency() throws Exception { - int numberOfTables = 20; - String tablePrefix = "concurrency_"; - - int[][] data = {{1, 2}, {3, 4}, {5, 6}, {7, 8}, {9, 10}, - {11, 12}, {13, 14}, {15, 16}, {17, 18}, {19, 20}}; - for (int i = 0; i < numberOfTables; i++) { - runStatementOnDriver("drop table if exists " + tablePrefix + i); - } - - try { - for (int i = 0; i < numberOfTables; i++) { - String tableName = tablePrefix + i; - runStatementOnDriver( - "create table " + tableName + " (a int, b int) " + - "clustered by (b) " + - "into 10 buckets " + - "stored as orc TBLPROPERTIES ('transactional'='true')"); - runStatementOnDriver("insert into " + tableName + makeValuesClause(data)); - } - - String[] args = {"-location", getTestDataDir(), "-execute"}; - PreUpgradeTool.callback = new PreUpgradeTool.Callback() { - @Override - void onWaitForCompaction() throws MetaException { - runWorker(hiveConf); - } - }; - PreUpgradeTool.pollIntervalMs = 1; - PreUpgradeTool.hiveConf = hiveConf; - PreUpgradeTool.main(args); - - } finally { - for (int i = 0; i < numberOfTables; i++) { - runStatementOnDriver("drop table if exists " + tablePrefix + i); - } - } - } - - private static void runWorker(HiveConf hiveConf) throws MetaException { - AtomicBoolean stop = new AtomicBoolean(true); - Worker t = new Worker(); - t.setThreadId((int) t.getId()); - t.setHiveConf(hiveConf); - AtomicBoolean looped = new AtomicBoolean(); - t.init(stop, looped); - t.run(); - } - - private static String makeValuesClause(int[][] rows) { - assert rows.length > 0; - StringBuilder sb = new StringBuilder(" values"); - for(int[] row : rows) { - assert row.length > 0; - if(row.length > 1) { - sb.append("("); - } - for(int value : row) { - sb.append(value).append(","); - } - sb.setLength(sb.length() - 1);//remove trailing comma - if(row.length > 1) { - sb.append(")"); - } - sb.append(","); - } - sb.setLength(sb.length() - 1);//remove trailing comma - return sb.toString(); - } - - private List runStatementOnDriver(String stmt) throws Exception { - CommandProcessorResponse cpr = d.run(stmt); - if(cpr.getResponseCode() != 0) { - throw new RuntimeException(stmt + " failed: " + cpr); - } - List rs = new ArrayList(); - d.getResults(rs); - return rs; - } - @Before - public void setUp() throws Exception { - setUpInternal(); - } - private void initHiveConf() { - hiveConf = new HiveConf(this.getClass()); - } - @Rule - public TestName testName = new TestName(); - private HiveConf hiveConf; - private Driver d; - private void setUpInternal() throws Exception { - initHiveConf(); - TxnDbUtil.cleanDb();//todo: api changed in 3.0 - FileUtils.deleteDirectory(new File(getTestDataDir())); - - Path workDir = new Path(System.getProperty("test.tmp.dir", - "target" + File.separator + "test" + File.separator + "tmp")); - hiveConf.set("mapred.local.dir", workDir + File.separator + this.getClass().getSimpleName() - + File.separator + "mapred" + File.separator + "local"); - hiveConf.set("mapred.system.dir", workDir + File.separator + this.getClass().getSimpleName() - + File.separator + "mapred" + File.separator + "system"); - hiveConf.set("mapreduce.jobtracker.staging.root.dir", workDir + File.separator + this.getClass().getSimpleName() - + File.separator + "mapred" + File.separator + "staging"); - hiveConf.set("mapred.temp.dir", workDir + File.separator + this.getClass().getSimpleName() - + File.separator + "mapred" + File.separator + "temp"); - hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); - hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); - hiveConf - .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, - "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); - hiveConf - .setVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS, - "org.apache.hadoop.hive.ql.security.authorization.AuthorizationPreEventListener"); - hiveConf - .setVar(HiveConf.ConfVars.HIVE_METASTORE_AUTHORIZATION_MANAGER, - "org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider"); - hiveConf.setBoolVar(HiveConf.ConfVars.MERGE_CARDINALITY_VIOLATION_CHECK, true); - hiveConf.setBoolVar(HiveConf.ConfVars.HIVESTATSCOLAUTOGATHER, false); - TxnDbUtil.setConfValues(hiveConf); - TxnDbUtil.prepDb();//todo: api changed in 3.0 - File f = new File(getWarehouseDir()); - if (f.exists()) { - FileUtil.fullyDelete(f); - } - if (!(new File(getWarehouseDir()).mkdirs())) { - throw new RuntimeException("Could not create " + getWarehouseDir()); - } - SessionState ss = SessionState.start(hiveConf); - ss.applyAuthorizationPolicy(); - d = new Driver(new QueryState(hiveConf), null); - d.setMaxRows(10000); - } - private String getWarehouseDir() { - return getTestDataDir() + "/warehouse"; - } - @After - public void tearDown() throws Exception { - if (d != null) { - d.close(); - d.destroy(); - d = null; - } - } - -} diff --git a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestRunOptions.java b/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestRunOptions.java deleted file mode 100644 index 8005b5cbc271..000000000000 --- a/upgrade-acid/pre-upgrade/src/test/java/org/apache/hadoop/hive/upgrade/acid/TestRunOptions.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.upgrade.acid; - -import static org.apache.hadoop.hive.upgrade.acid.PreUpgradeTool.createCommandLineOptions; -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; - -import org.apache.commons.cli.GnuParser; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -public class TestRunOptions { - - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - - @Test - public void testTablePoolSizeIs5WhenSpecified() throws Exception { - String[] args = {"-tablePoolSize", "5"}; - RunOptions runOptions = RunOptions.fromCommandLine(new GnuParser().parse(createCommandLineOptions(), args)); - assertThat(runOptions.getTablePoolSize(), is(5)); - } - - @Test - public void testExceptionIsThrownWhenTablePoolSizeIsNotANumber() throws Exception { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Please specify a positive integer option value for tablePoolSize"); - - String[] args = {"-tablePoolSize", "notANumber"}; - RunOptions.fromCommandLine(new GnuParser().parse(createCommandLineOptions(), args)); - } - - @Test - public void testExceptionIsThrownWhenTablePoolSizeIsLessThan1() throws Exception { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Please specify a positive integer option value for tablePoolSize"); - - String[] args = {"-tablePoolSize", "0"}; - RunOptions.fromCommandLine(new GnuParser().parse(createCommandLineOptions(), args)); - } - - @Test - public void testExceptionIsThrownWhenTablePoolSizeIsNotInteger() throws Exception { - expectedEx.expect(IllegalArgumentException.class); - expectedEx.expectMessage("Please specify a positive integer option value for tablePoolSize"); - - String[] args = {"-tablePoolSize", "0.5"}; - RunOptions.fromCommandLine(new GnuParser().parse(createCommandLineOptions(), args)); - } -} diff --git a/vector-code-gen/pom.xml b/vector-code-gen/pom.xml index bf5f126ce354..fc44bdc598dc 100644 --- a/vector-code-gen/pom.xml +++ b/vector-code-gen/pom.xml @@ -17,7 +17,7 @@ org.apache.hive hive - 4.0.0-beta-2-SNAPSHOT + 4.1.0-SNAPSHOT ../pom.xml hive-vector-code-gen